1 | /* C-compiler utilities for types and variables storage layout |
2 | Copyright (C) 1987-2024 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "target.h" |
25 | #include "function.h" |
26 | #include "rtl.h" |
27 | #include "tree.h" |
28 | #include "memmodel.h" |
29 | #include "tm_p.h" |
30 | #include "stringpool.h" |
31 | #include "regs.h" |
32 | #include "emit-rtl.h" |
33 | #include "cgraph.h" |
34 | #include "diagnostic-core.h" |
35 | #include "fold-const.h" |
36 | #include "stor-layout.h" |
37 | #include "varasm.h" |
38 | #include "print-tree.h" |
39 | #include "langhooks.h" |
40 | #include "tree-inline.h" |
41 | #include "dumpfile.h" |
42 | #include "gimplify.h" |
43 | #include "attribs.h" |
44 | #include "debug.h" |
45 | #include "calls.h" |
46 | |
47 | /* Data type for the expressions representing sizes of data types. |
48 | It is the first integer type laid out. */ |
49 | tree sizetype_tab[(int) stk_type_kind_last]; |
50 | |
51 | /* If nonzero, this is an upper limit on alignment of structure fields. |
52 | The value is measured in bits. */ |
53 | unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; |
54 | |
55 | static tree self_referential_size (tree); |
56 | static void finalize_record_size (record_layout_info); |
57 | static void finalize_type_size (tree); |
58 | static void place_union_field (record_layout_info, tree); |
59 | static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, |
60 | HOST_WIDE_INT, tree); |
61 | extern void debug_rli (record_layout_info); |
62 | |
63 | /* Given a size SIZE that may not be a constant, return a SAVE_EXPR |
64 | to serve as the actual size-expression for a type or decl. */ |
65 | |
66 | tree |
67 | variable_size (tree size) |
68 | { |
69 | /* Obviously. */ |
70 | if (TREE_CONSTANT (size)) |
71 | return size; |
72 | |
73 | /* If the size is self-referential, we can't make a SAVE_EXPR (see |
74 | save_expr for the rationale). But we can do something else. */ |
75 | if (CONTAINS_PLACEHOLDER_P (size)) |
76 | return self_referential_size (size); |
77 | |
78 | /* If we are in the global binding level, we can't make a SAVE_EXPR |
79 | since it may end up being shared across functions, so it is up |
80 | to the front-end to deal with this case. */ |
81 | if (lang_hooks.decls.global_bindings_p ()) |
82 | return size; |
83 | |
84 | return save_expr (size); |
85 | } |
86 | |
87 | /* An array of functions used for self-referential size computation. */ |
88 | static GTY(()) vec<tree, va_gc> *size_functions; |
89 | |
90 | /* Return true if T is a self-referential component reference. */ |
91 | |
92 | static bool |
93 | self_referential_component_ref_p (tree t) |
94 | { |
95 | if (TREE_CODE (t) != COMPONENT_REF) |
96 | return false; |
97 | |
98 | while (REFERENCE_CLASS_P (t)) |
99 | t = TREE_OPERAND (t, 0); |
100 | |
101 | return (TREE_CODE (t) == PLACEHOLDER_EXPR); |
102 | } |
103 | |
104 | /* Similar to copy_tree_r but do not copy component references involving |
105 | PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr |
106 | and substituted in substitute_in_expr. */ |
107 | |
108 | static tree |
109 | copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data) |
110 | { |
111 | enum tree_code code = TREE_CODE (*tp); |
112 | |
113 | /* Stop at types, decls, constants like copy_tree_r. */ |
114 | if (TREE_CODE_CLASS (code) == tcc_type |
115 | || TREE_CODE_CLASS (code) == tcc_declaration |
116 | || TREE_CODE_CLASS (code) == tcc_constant) |
117 | { |
118 | *walk_subtrees = 0; |
119 | return NULL_TREE; |
120 | } |
121 | |
122 | /* This is the pattern built in ada/make_aligning_type. */ |
123 | else if (code == ADDR_EXPR |
124 | && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR) |
125 | { |
126 | *walk_subtrees = 0; |
127 | return NULL_TREE; |
128 | } |
129 | |
130 | /* Default case: the component reference. */ |
131 | else if (self_referential_component_ref_p (t: *tp)) |
132 | { |
133 | *walk_subtrees = 0; |
134 | return NULL_TREE; |
135 | } |
136 | |
137 | /* We're not supposed to have them in self-referential size trees |
138 | because we wouldn't properly control when they are evaluated. |
139 | However, not creating superfluous SAVE_EXPRs requires accurate |
140 | tracking of readonly-ness all the way down to here, which we |
141 | cannot always guarantee in practice. So punt in this case. */ |
142 | else if (code == SAVE_EXPR) |
143 | return error_mark_node; |
144 | |
145 | else if (code == STATEMENT_LIST) |
146 | gcc_unreachable (); |
147 | |
148 | return copy_tree_r (tp, walk_subtrees, data); |
149 | } |
150 | |
151 | /* Given a SIZE expression that is self-referential, return an equivalent |
152 | expression to serve as the actual size expression for a type. */ |
153 | |
154 | static tree |
155 | self_referential_size (tree size) |
156 | { |
157 | static unsigned HOST_WIDE_INT fnno = 0; |
158 | vec<tree> self_refs = vNULL; |
159 | tree param_type_list = NULL, param_decl_list = NULL; |
160 | tree t, ref, return_type, fntype, fnname, fndecl; |
161 | unsigned int i; |
162 | char buf[128]; |
163 | vec<tree, va_gc> *args = NULL; |
164 | |
165 | /* Do not factor out simple operations. */ |
166 | t = skip_simple_constant_arithmetic (size); |
167 | if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t)) |
168 | return size; |
169 | |
170 | /* Collect the list of self-references in the expression. */ |
171 | find_placeholder_in_expr (size, &self_refs); |
172 | gcc_assert (self_refs.length () > 0); |
173 | |
174 | /* Obtain a private copy of the expression. */ |
175 | t = size; |
176 | if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE) |
177 | return size; |
178 | size = t; |
179 | |
180 | /* Build the parameter and argument lists in parallel; also |
181 | substitute the former for the latter in the expression. */ |
182 | vec_alloc (v&: args, nelems: self_refs.length ()); |
183 | FOR_EACH_VEC_ELT (self_refs, i, ref) |
184 | { |
185 | tree subst, param_name, param_type, param_decl; |
186 | |
187 | if (DECL_P (ref)) |
188 | { |
189 | /* We shouldn't have true variables here. */ |
190 | gcc_assert (TREE_READONLY (ref)); |
191 | subst = ref; |
192 | } |
193 | /* This is the pattern built in ada/make_aligning_type. */ |
194 | else if (TREE_CODE (ref) == ADDR_EXPR) |
195 | subst = ref; |
196 | /* Default case: the component reference. */ |
197 | else |
198 | subst = TREE_OPERAND (ref, 1); |
199 | |
200 | sprintf (s: buf, format: "p%d" , i); |
201 | param_name = get_identifier (buf); |
202 | param_type = TREE_TYPE (ref); |
203 | param_decl |
204 | = build_decl (input_location, PARM_DECL, param_name, param_type); |
205 | DECL_ARG_TYPE (param_decl) = param_type; |
206 | DECL_ARTIFICIAL (param_decl) = 1; |
207 | TREE_READONLY (param_decl) = 1; |
208 | |
209 | size = substitute_in_expr (size, subst, param_decl); |
210 | |
211 | param_type_list = tree_cons (NULL_TREE, param_type, param_type_list); |
212 | param_decl_list = chainon (param_decl, param_decl_list); |
213 | args->quick_push (obj: ref); |
214 | } |
215 | |
216 | self_refs.release (); |
217 | |
218 | /* Append 'void' to indicate that the number of parameters is fixed. */ |
219 | param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list); |
220 | |
221 | /* The 3 lists have been created in reverse order. */ |
222 | param_type_list = nreverse (param_type_list); |
223 | param_decl_list = nreverse (param_decl_list); |
224 | |
225 | /* Build the function type. */ |
226 | return_type = TREE_TYPE (size); |
227 | fntype = build_function_type (return_type, param_type_list); |
228 | |
229 | /* Build the function declaration. */ |
230 | sprintf (s: buf, format: "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++); |
231 | fnname = get_file_function_name (buf); |
232 | fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype); |
233 | for (t = param_decl_list; t; t = DECL_CHAIN (t)) |
234 | DECL_CONTEXT (t) = fndecl; |
235 | DECL_ARGUMENTS (fndecl) = param_decl_list; |
236 | DECL_RESULT (fndecl) |
237 | = build_decl (input_location, RESULT_DECL, 0, return_type); |
238 | DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; |
239 | |
240 | /* The function has been created by the compiler and we don't |
241 | want to emit debug info for it. */ |
242 | DECL_ARTIFICIAL (fndecl) = 1; |
243 | DECL_IGNORED_P (fndecl) = 1; |
244 | |
245 | /* It is supposed to be "const" and never throw. */ |
246 | TREE_READONLY (fndecl) = 1; |
247 | TREE_NOTHROW (fndecl) = 1; |
248 | |
249 | /* We want it to be inlined when this is deemed profitable, as |
250 | well as discarded if every call has been integrated. */ |
251 | DECL_DECLARED_INLINE_P (fndecl) = 1; |
252 | |
253 | /* It is made up of a unique return statement. */ |
254 | DECL_INITIAL (fndecl) = make_node (BLOCK); |
255 | BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; |
256 | t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size); |
257 | DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t); |
258 | TREE_STATIC (fndecl) = 1; |
259 | |
260 | /* Put it onto the list of size functions. */ |
261 | vec_safe_push (v&: size_functions, obj: fndecl); |
262 | |
263 | /* Replace the original expression with a call to the size function. */ |
264 | return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args); |
265 | } |
266 | |
267 | /* Take, queue and compile all the size functions. It is essential that |
268 | the size functions be gimplified at the very end of the compilation |
269 | in order to guarantee transparent handling of self-referential sizes. |
270 | Otherwise the GENERIC inliner would not be able to inline them back |
271 | at each of their call sites, thus creating artificial non-constant |
272 | size expressions which would trigger nasty problems later on. */ |
273 | |
274 | void |
275 | finalize_size_functions (void) |
276 | { |
277 | unsigned int i; |
278 | tree fndecl; |
279 | |
280 | for (i = 0; size_functions && size_functions->iterate (ix: i, ptr: &fndecl); i++) |
281 | { |
282 | allocate_struct_function (fndecl, false); |
283 | set_cfun (NULL); |
284 | dump_function (phase: TDI_original, fn: fndecl); |
285 | |
286 | /* As these functions are used to describe the layout of variable-length |
287 | structures, debug info generation needs their implementation. */ |
288 | debug_hooks->size_function (fndecl); |
289 | gimplify_function_tree (fndecl); |
290 | cgraph_node::finalize_function (fndecl, false); |
291 | } |
292 | |
293 | vec_free (v&: size_functions); |
294 | } |
295 | |
296 | /* Return a machine mode of class MCLASS with SIZE bits of precision, |
297 | if one exists. The mode may have padding bits as well the SIZE |
298 | value bits. If LIMIT is nonzero, disregard modes wider than |
299 | MAX_FIXED_MODE_SIZE. */ |
300 | |
301 | opt_machine_mode |
302 | mode_for_size (poly_uint64 size, enum mode_class mclass, int limit) |
303 | { |
304 | machine_mode mode; |
305 | int i; |
306 | |
307 | if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE)) |
308 | return opt_machine_mode (); |
309 | |
310 | /* Get the first mode which has this size, in the specified class. */ |
311 | FOR_EACH_MODE_IN_CLASS (mode, mclass) |
312 | if (known_eq (GET_MODE_PRECISION (mode), size)) |
313 | return mode; |
314 | |
315 | if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) |
316 | for (i = 0; i < NUM_INT_N_ENTS; i ++) |
317 | if (known_eq (int_n_data[i].bitsize, size) |
318 | && int_n_enabled_p[i]) |
319 | return int_n_data[i].m; |
320 | |
321 | return opt_machine_mode (); |
322 | } |
323 | |
324 | /* Similar, except passed a tree node. */ |
325 | |
326 | opt_machine_mode |
327 | mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) |
328 | { |
329 | unsigned HOST_WIDE_INT uhwi; |
330 | unsigned int ui; |
331 | |
332 | if (!tree_fits_uhwi_p (size)) |
333 | return opt_machine_mode (); |
334 | uhwi = tree_to_uhwi (size); |
335 | ui = uhwi; |
336 | if (uhwi != ui) |
337 | return opt_machine_mode (); |
338 | return mode_for_size (size: ui, mclass, limit); |
339 | } |
340 | |
341 | /* Return the narrowest mode of class MCLASS that contains at least |
342 | SIZE bits. Abort if no such mode exists. */ |
343 | |
344 | machine_mode |
345 | smallest_mode_for_size (poly_uint64 size, enum mode_class mclass) |
346 | { |
347 | machine_mode mode = VOIDmode; |
348 | int i; |
349 | |
350 | /* Get the first mode which has at least this size, in the |
351 | specified class. */ |
352 | FOR_EACH_MODE_IN_CLASS (mode, mclass) |
353 | if (known_ge (GET_MODE_PRECISION (mode), size)) |
354 | break; |
355 | |
356 | gcc_assert (mode != VOIDmode); |
357 | |
358 | if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) |
359 | for (i = 0; i < NUM_INT_N_ENTS; i ++) |
360 | if (known_ge (int_n_data[i].bitsize, size) |
361 | && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode)) |
362 | && int_n_enabled_p[i]) |
363 | mode = int_n_data[i].m; |
364 | |
365 | return mode; |
366 | } |
367 | |
368 | /* Return an integer mode of exactly the same size as MODE, if one exists. */ |
369 | |
370 | opt_scalar_int_mode |
371 | int_mode_for_mode (machine_mode mode) |
372 | { |
373 | switch (GET_MODE_CLASS (mode)) |
374 | { |
375 | case MODE_INT: |
376 | case MODE_PARTIAL_INT: |
377 | return as_a <scalar_int_mode> (m: mode); |
378 | |
379 | case MODE_COMPLEX_INT: |
380 | case MODE_COMPLEX_FLOAT: |
381 | case MODE_FLOAT: |
382 | case MODE_DECIMAL_FLOAT: |
383 | case MODE_FRACT: |
384 | case MODE_ACCUM: |
385 | case MODE_UFRACT: |
386 | case MODE_UACCUM: |
387 | case MODE_VECTOR_BOOL: |
388 | case MODE_VECTOR_INT: |
389 | case MODE_VECTOR_FLOAT: |
390 | case MODE_VECTOR_FRACT: |
391 | case MODE_VECTOR_ACCUM: |
392 | case MODE_VECTOR_UFRACT: |
393 | case MODE_VECTOR_UACCUM: |
394 | return int_mode_for_size (size: GET_MODE_BITSIZE (mode), limit: 0); |
395 | |
396 | case MODE_OPAQUE: |
397 | return opt_scalar_int_mode (); |
398 | |
399 | case MODE_RANDOM: |
400 | if (mode == BLKmode) |
401 | return opt_scalar_int_mode (); |
402 | |
403 | /* fall through */ |
404 | |
405 | case MODE_CC: |
406 | default: |
407 | gcc_unreachable (); |
408 | } |
409 | } |
410 | |
411 | /* Find a mode that can be used for efficient bitwise operations on MODE, |
412 | if one exists. */ |
413 | |
414 | opt_machine_mode |
415 | bitwise_mode_for_mode (machine_mode mode) |
416 | { |
417 | /* Quick exit if we already have a suitable mode. */ |
418 | scalar_int_mode int_mode; |
419 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
420 | && GET_MODE_BITSIZE (mode: int_mode) <= MAX_FIXED_MODE_SIZE) |
421 | return int_mode; |
422 | |
423 | /* Reuse the sanity checks from int_mode_for_mode. */ |
424 | gcc_checking_assert ((int_mode_for_mode (mode), true)); |
425 | |
426 | poly_int64 bitsize = GET_MODE_BITSIZE (mode); |
427 | |
428 | /* Try to replace complex modes with complex modes. In general we |
429 | expect both components to be processed independently, so we only |
430 | care whether there is a register for the inner mode. */ |
431 | if (COMPLEX_MODE_P (mode)) |
432 | { |
433 | machine_mode trial = mode; |
434 | if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT |
435 | || mode_for_size (size: bitsize, mclass: MODE_COMPLEX_INT, limit: false).exists (mode: &trial)) |
436 | && have_regs_of_mode[GET_MODE_INNER (trial)]) |
437 | return trial; |
438 | } |
439 | |
440 | /* Try to replace vector modes with vector modes. Also try using vector |
441 | modes if an integer mode would be too big. */ |
442 | if (VECTOR_MODE_P (mode) |
443 | || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE)) |
444 | { |
445 | machine_mode trial = mode; |
446 | if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT |
447 | || mode_for_size (size: bitsize, mclass: MODE_VECTOR_INT, limit: 0).exists (mode: &trial)) |
448 | && have_regs_of_mode[trial] |
449 | && targetm.vector_mode_supported_p (trial)) |
450 | return trial; |
451 | } |
452 | |
453 | /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */ |
454 | return mode_for_size (size: bitsize, mclass: MODE_INT, limit: true); |
455 | } |
456 | |
457 | /* Find a type that can be used for efficient bitwise operations on MODE. |
458 | Return null if no such mode exists. */ |
459 | |
460 | tree |
461 | bitwise_type_for_mode (machine_mode mode) |
462 | { |
463 | if (!bitwise_mode_for_mode (mode).exists (mode: &mode)) |
464 | return NULL_TREE; |
465 | |
466 | unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode); |
467 | tree inner_type = build_nonstandard_integer_type (inner_size, true); |
468 | |
469 | if (VECTOR_MODE_P (mode)) |
470 | return build_vector_type_for_mode (inner_type, mode); |
471 | |
472 | if (COMPLEX_MODE_P (mode)) |
473 | return build_complex_type (inner_type); |
474 | |
475 | gcc_checking_assert (GET_MODE_INNER (mode) == mode); |
476 | return inner_type; |
477 | } |
478 | |
479 | /* Find a mode that can be used for efficient bitwise operations on SIZE |
480 | bits, if one exists. */ |
481 | |
482 | opt_machine_mode |
483 | bitwise_mode_for_size (poly_uint64 size) |
484 | { |
485 | if (known_le (size, (unsigned int) MAX_FIXED_MODE_SIZE)) |
486 | return mode_for_size (size, mclass: MODE_INT, limit: true); |
487 | |
488 | machine_mode mode, ret = VOIDmode; |
489 | FOR_EACH_MODE_FROM (mode, MIN_MODE_VECTOR_INT) |
490 | if (known_eq (GET_MODE_BITSIZE (mode), size) |
491 | && (ret == VOIDmode || GET_MODE_INNER (mode) == QImode) |
492 | && have_regs_of_mode[mode] |
493 | && targetm.vector_mode_supported_p (mode)) |
494 | { |
495 | if (GET_MODE_INNER (mode) == QImode) |
496 | return mode; |
497 | else if (ret == VOIDmode) |
498 | ret = mode; |
499 | } |
500 | if (ret != VOIDmode) |
501 | return ret; |
502 | return opt_machine_mode (); |
503 | } |
504 | |
505 | /* Find a mode that is suitable for representing a vector with NUNITS |
506 | elements of mode INNERMODE, if one exists. The returned mode can be |
507 | either an integer mode or a vector mode. */ |
508 | |
509 | opt_machine_mode |
510 | mode_for_vector (scalar_mode innermode, poly_uint64 nunits) |
511 | { |
512 | machine_mode mode; |
513 | |
514 | /* First, look for a supported vector type. */ |
515 | if (SCALAR_FLOAT_MODE_P (innermode)) |
516 | mode = MIN_MODE_VECTOR_FLOAT; |
517 | else if (SCALAR_FRACT_MODE_P (innermode)) |
518 | mode = MIN_MODE_VECTOR_FRACT; |
519 | else if (SCALAR_UFRACT_MODE_P (innermode)) |
520 | mode = MIN_MODE_VECTOR_UFRACT; |
521 | else if (SCALAR_ACCUM_MODE_P (innermode)) |
522 | mode = MIN_MODE_VECTOR_ACCUM; |
523 | else if (SCALAR_UACCUM_MODE_P (innermode)) |
524 | mode = MIN_MODE_VECTOR_UACCUM; |
525 | else |
526 | mode = MIN_MODE_VECTOR_INT; |
527 | |
528 | /* Only check the broader vector_mode_supported_any_target_p here. |
529 | We'll filter through target-specific availability and |
530 | vector_mode_supported_p later in vector_type_mode. */ |
531 | FOR_EACH_MODE_FROM (mode, mode) |
532 | if (known_eq (GET_MODE_NUNITS (mode), nunits) |
533 | && GET_MODE_INNER (mode) == innermode |
534 | && targetm.vector_mode_supported_any_target_p (mode)) |
535 | return mode; |
536 | |
537 | /* For integers, try mapping it to a same-sized scalar mode. */ |
538 | if (GET_MODE_CLASS (innermode) == MODE_INT) |
539 | { |
540 | poly_uint64 nbits = nunits * GET_MODE_BITSIZE (mode: innermode); |
541 | if (int_mode_for_size (size: nbits, limit: 0).exists (mode: &mode) |
542 | && have_regs_of_mode[mode]) |
543 | return mode; |
544 | } |
545 | |
546 | return opt_machine_mode (); |
547 | } |
548 | |
549 | /* If a piece of code is using vector mode VECTOR_MODE and also wants |
550 | to operate on elements of mode ELEMENT_MODE, return the vector mode |
551 | it should use for those elements. If NUNITS is nonzero, ensure that |
552 | the mode has exactly NUNITS elements, otherwise pick whichever vector |
553 | size pairs the most naturally with VECTOR_MODE; this may mean choosing |
554 | a mode with a different size and/or number of elements, depending on |
555 | what the target prefers. Return an empty opt_machine_mode if there |
556 | is no supported vector mode with the required properties. |
557 | |
558 | Unlike mode_for_vector. any returned mode is guaranteed to satisfy |
559 | both VECTOR_MODE_P and targetm.vector_mode_supported_p. */ |
560 | |
561 | opt_machine_mode |
562 | related_vector_mode (machine_mode vector_mode, scalar_mode element_mode, |
563 | poly_uint64 nunits) |
564 | { |
565 | gcc_assert (VECTOR_MODE_P (vector_mode)); |
566 | return targetm.vectorize.related_mode (vector_mode, element_mode, nunits); |
567 | } |
568 | |
569 | /* If a piece of code is using vector mode VECTOR_MODE and also wants |
570 | to operate on integer vectors with the same element size and number |
571 | of elements, return the vector mode it should use. Return an empty |
572 | opt_machine_mode if there is no supported vector mode with the |
573 | required properties. |
574 | |
575 | Unlike mode_for_vector. any returned mode is guaranteed to satisfy |
576 | both VECTOR_MODE_P and targetm.vector_mode_supported_p. */ |
577 | |
578 | opt_machine_mode |
579 | related_int_vector_mode (machine_mode vector_mode) |
580 | { |
581 | gcc_assert (VECTOR_MODE_P (vector_mode)); |
582 | scalar_int_mode int_mode; |
583 | if (int_mode_for_mode (GET_MODE_INNER (vector_mode)).exists (mode: &int_mode)) |
584 | return related_vector_mode (vector_mode, element_mode: int_mode, |
585 | nunits: GET_MODE_NUNITS (mode: vector_mode)); |
586 | return opt_machine_mode (); |
587 | } |
588 | |
589 | /* Return the alignment of MODE. This will be bounded by 1 and |
590 | BIGGEST_ALIGNMENT. */ |
591 | |
592 | unsigned int |
593 | get_mode_alignment (machine_mode mode) |
594 | { |
595 | return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); |
596 | } |
597 | |
598 | /* Return the natural mode of an array, given that it is SIZE bytes in |
599 | total and has elements of type ELEM_TYPE. */ |
600 | |
601 | static machine_mode |
602 | mode_for_array (tree elem_type, tree size) |
603 | { |
604 | tree elem_size; |
605 | poly_uint64 int_size, int_elem_size; |
606 | unsigned HOST_WIDE_INT num_elems; |
607 | bool limit_p; |
608 | |
609 | /* One-element arrays get the component type's mode. */ |
610 | elem_size = TYPE_SIZE (elem_type); |
611 | if (simple_cst_equal (size, elem_size)) |
612 | return TYPE_MODE (elem_type); |
613 | |
614 | limit_p = true; |
615 | if (poly_int_tree_p (t: size, value: &int_size) |
616 | && poly_int_tree_p (t: elem_size, value: &int_elem_size) |
617 | && maybe_ne (a: int_elem_size, b: 0U) |
618 | && constant_multiple_p (a: int_size, b: int_elem_size, multiple: &num_elems)) |
619 | { |
620 | machine_mode elem_mode = TYPE_MODE (elem_type); |
621 | machine_mode mode; |
622 | if (targetm.array_mode (elem_mode, num_elems).exists (mode: &mode)) |
623 | return mode; |
624 | if (targetm.array_mode_supported_p (elem_mode, num_elems)) |
625 | limit_p = false; |
626 | } |
627 | return mode_for_size_tree (size, mclass: MODE_INT, limit: limit_p).else_blk (); |
628 | } |
629 | |
630 | /* Subroutine of layout_decl: Force alignment required for the data type. |
631 | But if the decl itself wants greater alignment, don't override that. */ |
632 | |
633 | static inline void |
634 | do_type_align (tree type, tree decl) |
635 | { |
636 | if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) |
637 | { |
638 | SET_DECL_ALIGN (decl, TYPE_ALIGN (type)); |
639 | if (TREE_CODE (decl) == FIELD_DECL) |
640 | DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); |
641 | } |
642 | if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl)) |
643 | SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type)); |
644 | } |
645 | |
646 | /* Set the size, mode and alignment of a ..._DECL node. |
647 | TYPE_DECL does need this for C++. |
648 | Note that LABEL_DECL and CONST_DECL nodes do not need this, |
649 | and FUNCTION_DECL nodes have them set up in a special (and simple) way. |
650 | Don't call layout_decl for them. |
651 | |
652 | KNOWN_ALIGN is the amount of alignment we can assume this |
653 | decl has with no special effort. It is relevant only for FIELD_DECLs |
654 | and depends on the previous fields. |
655 | All that matters about KNOWN_ALIGN is which powers of 2 divide it. |
656 | If KNOWN_ALIGN is 0, it means, "as much alignment as you like": |
657 | the record will be aligned to suit. */ |
658 | |
659 | void |
660 | layout_decl (tree decl, unsigned int known_align) |
661 | { |
662 | tree type = TREE_TYPE (decl); |
663 | enum tree_code code = TREE_CODE (decl); |
664 | rtx rtl = NULL_RTX; |
665 | location_t loc = DECL_SOURCE_LOCATION (decl); |
666 | |
667 | if (code == CONST_DECL) |
668 | return; |
669 | |
670 | gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL |
671 | || code == TYPE_DECL || code == FIELD_DECL); |
672 | |
673 | rtl = DECL_RTL_IF_SET (decl); |
674 | |
675 | if (type == error_mark_node) |
676 | type = void_type_node; |
677 | |
678 | /* Usually the size and mode come from the data type without change, |
679 | however, the front-end may set the explicit width of the field, so its |
680 | size may not be the same as the size of its type. This happens with |
681 | bitfields, of course (an `int' bitfield may be only 2 bits, say), but it |
682 | also happens with other fields. For example, the C++ front-end creates |
683 | zero-sized fields corresponding to empty base classes, and depends on |
684 | layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the |
685 | size in bytes from the size in bits. If we have already set the mode, |
686 | don't set it again since we can be called twice for FIELD_DECLs. */ |
687 | |
688 | DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); |
689 | if (DECL_MODE (decl) == VOIDmode) |
690 | SET_DECL_MODE (decl, TYPE_MODE (type)); |
691 | |
692 | if (DECL_SIZE (decl) == 0) |
693 | { |
694 | DECL_SIZE (decl) = TYPE_SIZE (type); |
695 | DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); |
696 | } |
697 | else if (DECL_SIZE_UNIT (decl) == 0) |
698 | DECL_SIZE_UNIT (decl) |
699 | = fold_convert_loc (loc, sizetype, |
700 | size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl), |
701 | bitsize_unit_node)); |
702 | |
703 | if (code != FIELD_DECL) |
704 | /* For non-fields, update the alignment from the type. */ |
705 | do_type_align (type, decl); |
706 | else |
707 | /* For fields, it's a bit more complicated... */ |
708 | { |
709 | bool old_user_align = DECL_USER_ALIGN (decl); |
710 | bool zero_bitfield = false; |
711 | bool packed_p = DECL_PACKED (decl); |
712 | unsigned int mfa; |
713 | |
714 | if (DECL_BIT_FIELD (decl)) |
715 | { |
716 | DECL_BIT_FIELD_TYPE (decl) = type; |
717 | |
718 | /* A zero-length bit-field affects the alignment of the next |
719 | field. In essence such bit-fields are not influenced by |
720 | any packing due to #pragma pack or attribute packed. */ |
721 | if (integer_zerop (DECL_SIZE (decl)) |
722 | && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) |
723 | { |
724 | zero_bitfield = true; |
725 | packed_p = false; |
726 | if (PCC_BITFIELD_TYPE_MATTERS) |
727 | do_type_align (type, decl); |
728 | else |
729 | { |
730 | #ifdef EMPTY_FIELD_BOUNDARY |
731 | if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) |
732 | { |
733 | SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY); |
734 | DECL_USER_ALIGN (decl) = 0; |
735 | } |
736 | #endif |
737 | } |
738 | } |
739 | |
740 | /* See if we can use an ordinary integer mode for a bit-field. |
741 | Conditions are: a fixed size that is correct for another mode, |
742 | occupying a complete byte or bytes on proper boundary. */ |
743 | if (TYPE_SIZE (type) != 0 |
744 | && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST |
745 | && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) |
746 | { |
747 | machine_mode xmode; |
748 | if (mode_for_size_tree (DECL_SIZE (decl), |
749 | mclass: MODE_INT, limit: 1).exists (mode: &xmode)) |
750 | { |
751 | unsigned int xalign = GET_MODE_ALIGNMENT (xmode); |
752 | if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl)) |
753 | && (known_align == 0 || known_align >= xalign)) |
754 | { |
755 | SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl))); |
756 | SET_DECL_MODE (decl, xmode); |
757 | DECL_BIT_FIELD (decl) = 0; |
758 | } |
759 | } |
760 | } |
761 | |
762 | /* Turn off DECL_BIT_FIELD if we won't need it set. */ |
763 | if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode |
764 | && known_align >= TYPE_ALIGN (type) |
765 | && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) |
766 | DECL_BIT_FIELD (decl) = 0; |
767 | } |
768 | else if (packed_p && DECL_USER_ALIGN (decl)) |
769 | /* Don't touch DECL_ALIGN. For other packed fields, go ahead and |
770 | round up; we'll reduce it again below. We want packing to |
771 | supersede USER_ALIGN inherited from the type, but defer to |
772 | alignment explicitly specified on the field decl. */; |
773 | else |
774 | do_type_align (type, decl); |
775 | |
776 | /* If the field is packed and not explicitly aligned, give it the |
777 | minimum alignment. Note that do_type_align may set |
778 | DECL_USER_ALIGN, so we need to check old_user_align instead. */ |
779 | if (packed_p |
780 | && !old_user_align) |
781 | SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT)); |
782 | |
783 | if (! packed_p && ! DECL_USER_ALIGN (decl)) |
784 | { |
785 | /* Some targets (i.e. i386, VMS) limit struct field alignment |
786 | to a lower boundary than alignment of variables unless |
787 | it was overridden by attribute aligned. */ |
788 | #ifdef BIGGEST_FIELD_ALIGNMENT |
789 | SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), |
790 | (unsigned) BIGGEST_FIELD_ALIGNMENT)); |
791 | #endif |
792 | #ifdef ADJUST_FIELD_ALIGN |
793 | SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl), |
794 | DECL_ALIGN (decl))); |
795 | #endif |
796 | } |
797 | |
798 | if (zero_bitfield) |
799 | mfa = initial_max_fld_align * BITS_PER_UNIT; |
800 | else |
801 | mfa = maximum_field_alignment; |
802 | /* Should this be controlled by DECL_USER_ALIGN, too? */ |
803 | if (mfa != 0) |
804 | SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa)); |
805 | } |
806 | |
807 | /* Evaluate nonconstant size only once, either now or as soon as safe. */ |
808 | if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) |
809 | DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); |
810 | if (DECL_SIZE_UNIT (decl) != 0 |
811 | && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) |
812 | DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); |
813 | |
814 | /* If requested, warn about definitions of large data objects. */ |
815 | if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl))) |
816 | && !DECL_EXTERNAL (decl)) |
817 | { |
818 | tree size = DECL_SIZE_UNIT (decl); |
819 | |
820 | if (size != 0 && TREE_CODE (size) == INTEGER_CST) |
821 | { |
822 | /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated |
823 | as if PTRDIFF_MAX had been specified, with the value |
824 | being that on the target rather than the host. */ |
825 | unsigned HOST_WIDE_INT max_size = warn_larger_than_size; |
826 | if (max_size == HOST_WIDE_INT_MAX) |
827 | max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node)); |
828 | |
829 | if (compare_tree_int (size, max_size) > 0) |
830 | warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds " |
831 | "maximum object size %wu" , |
832 | decl, size, max_size); |
833 | } |
834 | } |
835 | |
836 | /* If the RTL was already set, update its mode and mem attributes. */ |
837 | if (rtl) |
838 | { |
839 | PUT_MODE (x: rtl, DECL_MODE (decl)); |
840 | SET_DECL_RTL (decl, 0); |
841 | if (MEM_P (rtl)) |
842 | set_mem_attributes (rtl, decl, 1); |
843 | SET_DECL_RTL (decl, rtl); |
844 | } |
845 | } |
846 | |
847 | /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the |
848 | results of a previous call to layout_decl and calls it again. */ |
849 | |
850 | void |
851 | relayout_decl (tree decl) |
852 | { |
853 | DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; |
854 | SET_DECL_MODE (decl, VOIDmode); |
855 | if (!DECL_USER_ALIGN (decl)) |
856 | SET_DECL_ALIGN (decl, 0); |
857 | if (DECL_RTL_SET_P (decl)) |
858 | SET_DECL_RTL (decl, 0); |
859 | |
860 | layout_decl (decl, known_align: 0); |
861 | } |
862 | |
863 | /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or |
864 | QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which |
865 | is to be passed to all other layout functions for this record. It is the |
866 | responsibility of the caller to call `free' for the storage returned. |
867 | Note that garbage collection is not permitted until we finish laying |
868 | out the record. */ |
869 | |
870 | record_layout_info |
871 | start_record_layout (tree t) |
872 | { |
873 | record_layout_info rli = XNEW (struct record_layout_info_s); |
874 | |
875 | rli->t = t; |
876 | |
877 | /* If the type has a minimum specified alignment (via an attribute |
878 | declaration, for example) use it -- otherwise, start with a |
879 | one-byte alignment. */ |
880 | rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); |
881 | rli->unpacked_align = rli->record_align; |
882 | rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); |
883 | |
884 | #ifdef STRUCTURE_SIZE_BOUNDARY |
885 | /* Packed structures don't need to have minimum size. */ |
886 | if (! TYPE_PACKED (t)) |
887 | { |
888 | unsigned tmp; |
889 | |
890 | /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */ |
891 | tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY; |
892 | if (maximum_field_alignment != 0) |
893 | tmp = MIN (tmp, maximum_field_alignment); |
894 | rli->record_align = MAX (rli->record_align, tmp); |
895 | } |
896 | #endif |
897 | |
898 | rli->offset = size_zero_node; |
899 | rli->bitpos = bitsize_zero_node; |
900 | rli->prev_field = 0; |
901 | rli->pending_statics = 0; |
902 | rli->packed_maybe_necessary = 0; |
903 | rli->remaining_in_alignment = 0; |
904 | |
905 | return rli; |
906 | } |
907 | |
908 | /* Fold sizetype value X to bitsizetype, given that X represents a type |
909 | size or offset. */ |
910 | |
911 | static tree |
912 | bits_from_bytes (tree x) |
913 | { |
914 | if (POLY_INT_CST_P (x)) |
915 | /* The runtime calculation isn't allowed to overflow sizetype; |
916 | increasing the runtime values must always increase the size |
917 | or offset of the object. This means that the object imposes |
918 | a maximum value on the runtime parameters, but we don't record |
919 | what that is. */ |
920 | return build_poly_int_cst |
921 | (bitsizetype, |
922 | poly_wide_int::from (a: poly_int_cst_value (x), |
923 | TYPE_PRECISION (bitsizetype), |
924 | TYPE_SIGN (TREE_TYPE (x)))); |
925 | x = fold_convert (bitsizetype, x); |
926 | gcc_checking_assert (x); |
927 | return x; |
928 | } |
929 | |
930 | /* Return the combined bit position for the byte offset OFFSET and the |
931 | bit position BITPOS. |
932 | |
933 | These functions operate on byte and bit positions present in FIELD_DECLs |
934 | and assume that these expressions result in no (intermediate) overflow. |
935 | This assumption is necessary to fold the expressions as much as possible, |
936 | so as to avoid creating artificially variable-sized types in languages |
937 | supporting variable-sized types like Ada. */ |
938 | |
939 | tree |
940 | bit_from_pos (tree offset, tree bitpos) |
941 | { |
942 | return size_binop (PLUS_EXPR, bitpos, |
943 | size_binop (MULT_EXPR, bits_from_bytes (offset), |
944 | bitsize_unit_node)); |
945 | } |
946 | |
947 | /* Return the combined truncated byte position for the byte offset OFFSET and |
948 | the bit position BITPOS. */ |
949 | |
950 | tree |
951 | byte_from_pos (tree offset, tree bitpos) |
952 | { |
953 | tree bytepos; |
954 | if (TREE_CODE (bitpos) == MULT_EXPR |
955 | && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node)) |
956 | bytepos = TREE_OPERAND (bitpos, 0); |
957 | else |
958 | bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node); |
959 | return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos)); |
960 | } |
961 | |
962 | /* Split the bit position POS into a byte offset *POFFSET and a bit |
963 | position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */ |
964 | |
965 | void |
966 | pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, |
967 | tree pos) |
968 | { |
969 | tree toff_align = bitsize_int (off_align); |
970 | if (TREE_CODE (pos) == MULT_EXPR |
971 | && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align)) |
972 | { |
973 | *poffset = size_binop (MULT_EXPR, |
974 | fold_convert (sizetype, TREE_OPERAND (pos, 0)), |
975 | size_int (off_align / BITS_PER_UNIT)); |
976 | *pbitpos = bitsize_zero_node; |
977 | } |
978 | else |
979 | { |
980 | *poffset = size_binop (MULT_EXPR, |
981 | fold_convert (sizetype, |
982 | size_binop (FLOOR_DIV_EXPR, pos, |
983 | toff_align)), |
984 | size_int (off_align / BITS_PER_UNIT)); |
985 | *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align); |
986 | } |
987 | } |
988 | |
989 | /* Given a pointer to bit and byte offsets and an offset alignment, |
990 | normalize the offsets so they are within the alignment. */ |
991 | |
992 | void |
993 | normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) |
994 | { |
995 | /* If the bit position is now larger than it should be, adjust it |
996 | downwards. */ |
997 | if (compare_tree_int (*pbitpos, off_align) >= 0) |
998 | { |
999 | tree offset, bitpos; |
1000 | pos_from_bit (poffset: &offset, pbitpos: &bitpos, off_align, pos: *pbitpos); |
1001 | *poffset = size_binop (PLUS_EXPR, *poffset, offset); |
1002 | *pbitpos = bitpos; |
1003 | } |
1004 | } |
1005 | |
1006 | /* Print debugging information about the information in RLI. */ |
1007 | |
1008 | DEBUG_FUNCTION void |
1009 | debug_rli (record_layout_info rli) |
1010 | { |
1011 | print_node_brief (stderr, "type" , rli->t, 0); |
1012 | print_node_brief (stderr, "\noffset" , rli->offset, 0); |
1013 | print_node_brief (stderr, " bitpos" , rli->bitpos, 0); |
1014 | |
1015 | fprintf (stderr, format: "\naligns: rec = %u, unpack = %u, off = %u\n" , |
1016 | rli->record_align, rli->unpacked_align, |
1017 | rli->offset_align); |
1018 | |
1019 | /* The ms_struct code is the only that uses this. */ |
1020 | if (targetm.ms_bitfield_layout_p (rli->t)) |
1021 | fprintf (stderr, format: "remaining in alignment = %u\n" , rli->remaining_in_alignment); |
1022 | |
1023 | if (rli->packed_maybe_necessary) |
1024 | fprintf (stderr, format: "packed may be necessary\n" ); |
1025 | |
1026 | if (!vec_safe_is_empty (v: rli->pending_statics)) |
1027 | { |
1028 | fprintf (stderr, format: "pending statics:\n" ); |
1029 | debug (ptr: rli->pending_statics); |
1030 | } |
1031 | } |
1032 | |
1033 | /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and |
1034 | BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ |
1035 | |
1036 | void |
1037 | normalize_rli (record_layout_info rli) |
1038 | { |
1039 | normalize_offset (poffset: &rli->offset, pbitpos: &rli->bitpos, off_align: rli->offset_align); |
1040 | } |
1041 | |
1042 | /* Returns the size in bytes allocated so far. */ |
1043 | |
1044 | tree |
1045 | rli_size_unit_so_far (record_layout_info rli) |
1046 | { |
1047 | return byte_from_pos (offset: rli->offset, bitpos: rli->bitpos); |
1048 | } |
1049 | |
1050 | /* Returns the size in bits allocated so far. */ |
1051 | |
1052 | tree |
1053 | rli_size_so_far (record_layout_info rli) |
1054 | { |
1055 | return bit_from_pos (offset: rli->offset, bitpos: rli->bitpos); |
1056 | } |
1057 | |
1058 | /* FIELD is about to be added to RLI->T. The alignment (in bits) of |
1059 | the next available location within the record is given by KNOWN_ALIGN. |
1060 | Update the variable alignment fields in RLI, and return the alignment |
1061 | to give the FIELD. */ |
1062 | |
1063 | unsigned int |
1064 | update_alignment_for_field (record_layout_info rli, tree field, |
1065 | unsigned int known_align) |
1066 | { |
1067 | /* The alignment required for FIELD. */ |
1068 | unsigned int desired_align; |
1069 | /* The type of this field. */ |
1070 | tree type = TREE_TYPE (field); |
1071 | /* True if the field was explicitly aligned by the user. */ |
1072 | bool user_align; |
1073 | bool is_bitfield; |
1074 | |
1075 | /* Do not attempt to align an ERROR_MARK node */ |
1076 | if (TREE_CODE (type) == ERROR_MARK) |
1077 | return 0; |
1078 | |
1079 | /* Lay out the field so we know what alignment it needs. */ |
1080 | layout_decl (decl: field, known_align); |
1081 | desired_align = DECL_ALIGN (field); |
1082 | user_align = DECL_USER_ALIGN (field); |
1083 | |
1084 | is_bitfield = (type != error_mark_node |
1085 | && DECL_BIT_FIELD_TYPE (field) |
1086 | && ! integer_zerop (TYPE_SIZE (type))); |
1087 | |
1088 | /* Record must have at least as much alignment as any field. |
1089 | Otherwise, the alignment of the field within the record is |
1090 | meaningless. */ |
1091 | if (targetm.ms_bitfield_layout_p (rli->t)) |
1092 | { |
1093 | /* Here, the alignment of the underlying type of a bitfield can |
1094 | affect the alignment of a record; even a zero-sized field |
1095 | can do this. The alignment should be to the alignment of |
1096 | the type, except that for zero-size bitfields this only |
1097 | applies if there was an immediately prior, nonzero-size |
1098 | bitfield. (That's the way it is, experimentally.) */ |
1099 | if (!is_bitfield |
1100 | || ((DECL_SIZE (field) == NULL_TREE |
1101 | || !integer_zerop (DECL_SIZE (field))) |
1102 | ? !DECL_PACKED (field) |
1103 | : (rli->prev_field |
1104 | && DECL_BIT_FIELD_TYPE (rli->prev_field) |
1105 | && ! integer_zerop (DECL_SIZE (rli->prev_field))))) |
1106 | { |
1107 | unsigned int type_align = TYPE_ALIGN (type); |
1108 | if (!is_bitfield && DECL_PACKED (field)) |
1109 | type_align = desired_align; |
1110 | else |
1111 | type_align = MAX (type_align, desired_align); |
1112 | if (maximum_field_alignment != 0) |
1113 | type_align = MIN (type_align, maximum_field_alignment); |
1114 | rli->record_align = MAX (rli->record_align, type_align); |
1115 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
1116 | } |
1117 | } |
1118 | else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) |
1119 | { |
1120 | /* Named bit-fields cause the entire structure to have the |
1121 | alignment implied by their type. Some targets also apply the same |
1122 | rules to unnamed bitfields. */ |
1123 | if (DECL_NAME (field) != 0 |
1124 | || targetm.align_anon_bitfield ()) |
1125 | { |
1126 | unsigned int type_align = TYPE_ALIGN (type); |
1127 | |
1128 | #ifdef ADJUST_FIELD_ALIGN |
1129 | if (! TYPE_USER_ALIGN (type)) |
1130 | type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
1131 | #endif |
1132 | |
1133 | /* Targets might chose to handle unnamed and hence possibly |
1134 | zero-width bitfield. Those are not influenced by #pragmas |
1135 | or packed attributes. */ |
1136 | if (integer_zerop (DECL_SIZE (field))) |
1137 | { |
1138 | if (initial_max_fld_align) |
1139 | type_align = MIN (type_align, |
1140 | initial_max_fld_align * BITS_PER_UNIT); |
1141 | } |
1142 | else if (maximum_field_alignment != 0) |
1143 | type_align = MIN (type_align, maximum_field_alignment); |
1144 | else if (DECL_PACKED (field)) |
1145 | type_align = MIN (type_align, BITS_PER_UNIT); |
1146 | |
1147 | /* The alignment of the record is increased to the maximum |
1148 | of the current alignment, the alignment indicated on the |
1149 | field (i.e., the alignment specified by an __aligned__ |
1150 | attribute), and the alignment indicated by the type of |
1151 | the field. */ |
1152 | rli->record_align = MAX (rli->record_align, desired_align); |
1153 | rli->record_align = MAX (rli->record_align, type_align); |
1154 | |
1155 | if (warn_packed) |
1156 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
1157 | user_align |= TYPE_USER_ALIGN (type); |
1158 | } |
1159 | } |
1160 | else |
1161 | { |
1162 | rli->record_align = MAX (rli->record_align, desired_align); |
1163 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
1164 | } |
1165 | |
1166 | TYPE_USER_ALIGN (rli->t) |= user_align; |
1167 | |
1168 | return desired_align; |
1169 | } |
1170 | |
1171 | /* Issue a warning if the record alignment, RECORD_ALIGN, is less than |
1172 | the field alignment of FIELD or FIELD isn't aligned. */ |
1173 | |
1174 | static void |
1175 | handle_warn_if_not_align (tree field, unsigned int record_align) |
1176 | { |
1177 | tree type = TREE_TYPE (field); |
1178 | |
1179 | if (type == error_mark_node) |
1180 | return; |
1181 | |
1182 | unsigned int warn_if_not_align = 0; |
1183 | |
1184 | int opt_w = 0; |
1185 | |
1186 | if (warn_if_not_aligned) |
1187 | { |
1188 | warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field); |
1189 | if (!warn_if_not_align) |
1190 | warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type); |
1191 | if (warn_if_not_align) |
1192 | opt_w = OPT_Wif_not_aligned; |
1193 | } |
1194 | |
1195 | if (!warn_if_not_align |
1196 | && warn_packed_not_aligned |
1197 | && lookup_attribute (attr_name: "aligned" , TYPE_ATTRIBUTES (type))) |
1198 | { |
1199 | warn_if_not_align = TYPE_ALIGN (type); |
1200 | opt_w = OPT_Wpacked_not_aligned; |
1201 | } |
1202 | |
1203 | if (!warn_if_not_align) |
1204 | return; |
1205 | |
1206 | tree context = DECL_CONTEXT (field); |
1207 | |
1208 | warn_if_not_align /= BITS_PER_UNIT; |
1209 | record_align /= BITS_PER_UNIT; |
1210 | if ((record_align % warn_if_not_align) != 0) |
1211 | warning (opt_w, "alignment %u of %qT is less than %u" , |
1212 | record_align, context, warn_if_not_align); |
1213 | |
1214 | tree off = byte_position (field); |
1215 | if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align))) |
1216 | { |
1217 | if (TREE_CODE (off) == INTEGER_CST) |
1218 | warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u" , |
1219 | field, off, context, warn_if_not_align); |
1220 | else |
1221 | warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u" , |
1222 | field, off, context, warn_if_not_align); |
1223 | } |
1224 | } |
1225 | |
1226 | /* Called from place_field to handle unions. */ |
1227 | |
1228 | static void |
1229 | place_union_field (record_layout_info rli, tree field) |
1230 | { |
1231 | update_alignment_for_field (rli, field, /*known_align=*/0); |
1232 | |
1233 | DECL_FIELD_OFFSET (field) = size_zero_node; |
1234 | DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; |
1235 | SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); |
1236 | handle_warn_if_not_align (field, record_align: rli->record_align); |
1237 | |
1238 | /* If this is an ERROR_MARK return *after* having set the |
1239 | field at the start of the union. This helps when parsing |
1240 | invalid fields. */ |
1241 | if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) |
1242 | return; |
1243 | |
1244 | if (AGGREGATE_TYPE_P (TREE_TYPE (field)) |
1245 | && TYPE_TYPELESS_STORAGE (TREE_TYPE (field))) |
1246 | TYPE_TYPELESS_STORAGE (rli->t) = 1; |
1247 | |
1248 | /* We assume the union's size will be a multiple of a byte so we don't |
1249 | bother with BITPOS. */ |
1250 | if (TREE_CODE (rli->t) == UNION_TYPE) |
1251 | rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); |
1252 | else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) |
1253 | rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field), |
1254 | DECL_SIZE_UNIT (field), rli->offset); |
1255 | } |
1256 | |
1257 | /* A bitfield of SIZE with a required access alignment of ALIGN is allocated |
1258 | at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more |
1259 | units of alignment than the underlying TYPE. */ |
1260 | static int |
1261 | excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, |
1262 | HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) |
1263 | { |
1264 | /* Note that the calculation of OFFSET might overflow; we calculate it so |
1265 | that we still get the right result as long as ALIGN is a power of two. */ |
1266 | unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; |
1267 | |
1268 | offset = offset % align; |
1269 | return ((offset + size + align - 1) / align |
1270 | > tree_to_uhwi (TYPE_SIZE (type)) / align); |
1271 | } |
1272 | |
1273 | /* RLI contains information about the layout of a RECORD_TYPE. FIELD |
1274 | is a FIELD_DECL to be added after those fields already present in |
1275 | T. (FIELD is not actually added to the TYPE_FIELDS list here; |
1276 | callers that desire that behavior must manually perform that step.) */ |
1277 | |
1278 | void |
1279 | place_field (record_layout_info rli, tree field) |
1280 | { |
1281 | /* The alignment required for FIELD. */ |
1282 | unsigned int desired_align; |
1283 | /* The alignment FIELD would have if we just dropped it into the |
1284 | record as it presently stands. */ |
1285 | unsigned int known_align; |
1286 | unsigned int actual_align; |
1287 | /* The type of this field. */ |
1288 | tree type = TREE_TYPE (field); |
1289 | |
1290 | gcc_assert (TREE_CODE (field) != ERROR_MARK); |
1291 | |
1292 | /* If FIELD is static, then treat it like a separate variable, not |
1293 | really like a structure field. If it is a FUNCTION_DECL, it's a |
1294 | method. In both cases, all we do is lay out the decl, and we do |
1295 | it *after* the record is laid out. */ |
1296 | if (VAR_P (field)) |
1297 | { |
1298 | vec_safe_push (v&: rli->pending_statics, obj: field); |
1299 | return; |
1300 | } |
1301 | |
1302 | /* Enumerators and enum types which are local to this class need not |
1303 | be laid out. Likewise for initialized constant fields. */ |
1304 | else if (TREE_CODE (field) != FIELD_DECL) |
1305 | return; |
1306 | |
1307 | /* Unions are laid out very differently than records, so split |
1308 | that code off to another function. */ |
1309 | else if (TREE_CODE (rli->t) != RECORD_TYPE) |
1310 | { |
1311 | place_union_field (rli, field); |
1312 | return; |
1313 | } |
1314 | |
1315 | else if (TREE_CODE (type) == ERROR_MARK) |
1316 | { |
1317 | /* Place this field at the current allocation position, so we |
1318 | maintain monotonicity. */ |
1319 | DECL_FIELD_OFFSET (field) = rli->offset; |
1320 | DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; |
1321 | SET_DECL_OFFSET_ALIGN (field, rli->offset_align); |
1322 | handle_warn_if_not_align (field, record_align: rli->record_align); |
1323 | return; |
1324 | } |
1325 | |
1326 | if (AGGREGATE_TYPE_P (type) |
1327 | && TYPE_TYPELESS_STORAGE (type)) |
1328 | TYPE_TYPELESS_STORAGE (rli->t) = 1; |
1329 | |
1330 | /* Work out the known alignment so far. Note that A & (-A) is the |
1331 | value of the least-significant bit in A that is one. */ |
1332 | if (! integer_zerop (rli->bitpos)) |
1333 | known_align = least_bit_hwi (x: tree_to_uhwi (rli->bitpos)); |
1334 | else if (integer_zerop (rli->offset)) |
1335 | known_align = 0; |
1336 | else if (tree_fits_uhwi_p (rli->offset)) |
1337 | known_align = (BITS_PER_UNIT |
1338 | * least_bit_hwi (x: tree_to_uhwi (rli->offset))); |
1339 | else |
1340 | known_align = rli->offset_align; |
1341 | |
1342 | desired_align = update_alignment_for_field (rli, field, known_align); |
1343 | if (known_align == 0) |
1344 | known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); |
1345 | |
1346 | if (warn_packed && DECL_PACKED (field)) |
1347 | { |
1348 | if (known_align >= TYPE_ALIGN (type)) |
1349 | { |
1350 | if (TYPE_ALIGN (type) > desired_align) |
1351 | { |
1352 | if (STRICT_ALIGNMENT) |
1353 | warning (OPT_Wattributes, "packed attribute causes " |
1354 | "inefficient alignment for %q+D" , field); |
1355 | /* Don't warn if DECL_PACKED was set by the type. */ |
1356 | else if (!TYPE_PACKED (rli->t)) |
1357 | warning (OPT_Wattributes, "packed attribute is " |
1358 | "unnecessary for %q+D" , field); |
1359 | } |
1360 | } |
1361 | else |
1362 | rli->packed_maybe_necessary = 1; |
1363 | } |
1364 | |
1365 | /* Does this field automatically have alignment it needs by virtue |
1366 | of the fields that precede it and the record's own alignment? */ |
1367 | if (known_align < desired_align |
1368 | && (! targetm.ms_bitfield_layout_p (rli->t) |
1369 | || rli->prev_field == NULL)) |
1370 | { |
1371 | /* No, we need to skip space before this field. |
1372 | Bump the cumulative size to multiple of field alignment. */ |
1373 | |
1374 | if (!targetm.ms_bitfield_layout_p (rli->t) |
1375 | && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION |
1376 | && !TYPE_ARTIFICIAL (rli->t)) |
1377 | warning (OPT_Wpadded, "padding struct to align %q+D" , field); |
1378 | |
1379 | /* If the alignment is still within offset_align, just align |
1380 | the bit position. */ |
1381 | if (desired_align < rli->offset_align) |
1382 | rli->bitpos = round_up (rli->bitpos, desired_align); |
1383 | else |
1384 | { |
1385 | /* First adjust OFFSET by the partial bits, then align. */ |
1386 | rli->offset |
1387 | = size_binop (PLUS_EXPR, rli->offset, |
1388 | fold_convert (sizetype, |
1389 | size_binop (CEIL_DIV_EXPR, rli->bitpos, |
1390 | bitsize_unit_node))); |
1391 | rli->bitpos = bitsize_zero_node; |
1392 | |
1393 | rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); |
1394 | } |
1395 | |
1396 | if (! TREE_CONSTANT (rli->offset)) |
1397 | rli->offset_align = desired_align; |
1398 | } |
1399 | |
1400 | /* Handle compatibility with PCC. Note that if the record has any |
1401 | variable-sized fields, we need not worry about compatibility. */ |
1402 | if (PCC_BITFIELD_TYPE_MATTERS |
1403 | && ! targetm.ms_bitfield_layout_p (rli->t) |
1404 | && TREE_CODE (field) == FIELD_DECL |
1405 | && type != error_mark_node |
1406 | && DECL_BIT_FIELD (field) |
1407 | && (! DECL_PACKED (field) |
1408 | /* Enter for these packed fields only to issue a warning. */ |
1409 | || TYPE_ALIGN (type) <= BITS_PER_UNIT) |
1410 | && maximum_field_alignment == 0 |
1411 | && ! integer_zerop (DECL_SIZE (field)) |
1412 | && tree_fits_uhwi_p (DECL_SIZE (field)) |
1413 | && tree_fits_uhwi_p (rli->offset) |
1414 | && tree_fits_uhwi_p (TYPE_SIZE (type))) |
1415 | { |
1416 | unsigned int type_align = TYPE_ALIGN (type); |
1417 | tree dsize = DECL_SIZE (field); |
1418 | HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1419 | HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); |
1420 | HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); |
1421 | |
1422 | #ifdef ADJUST_FIELD_ALIGN |
1423 | if (! TYPE_USER_ALIGN (type)) |
1424 | type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
1425 | #endif |
1426 | |
1427 | /* A bit field may not span more units of alignment of its type |
1428 | than its type itself. Advance to next boundary if necessary. */ |
1429 | if (excess_unit_span (byte_offset: offset, bit_offset, size: field_size, align: type_align, type)) |
1430 | { |
1431 | if (DECL_PACKED (field)) |
1432 | { |
1433 | if (warn_packed_bitfield_compat == 1) |
1434 | inform |
1435 | (input_location, |
1436 | "offset of packed bit-field %qD has changed in GCC 4.4" , |
1437 | field); |
1438 | } |
1439 | else |
1440 | rli->bitpos = round_up (rli->bitpos, type_align); |
1441 | } |
1442 | |
1443 | if (! DECL_PACKED (field)) |
1444 | TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); |
1445 | |
1446 | SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, |
1447 | TYPE_WARN_IF_NOT_ALIGN (type)); |
1448 | } |
1449 | |
1450 | #ifdef BITFIELD_NBYTES_LIMITED |
1451 | if (BITFIELD_NBYTES_LIMITED |
1452 | && ! targetm.ms_bitfield_layout_p (rli->t) |
1453 | && TREE_CODE (field) == FIELD_DECL |
1454 | && type != error_mark_node |
1455 | && DECL_BIT_FIELD_TYPE (field) |
1456 | && ! DECL_PACKED (field) |
1457 | && ! integer_zerop (DECL_SIZE (field)) |
1458 | && tree_fits_uhwi_p (DECL_SIZE (field)) |
1459 | && tree_fits_uhwi_p (rli->offset) |
1460 | && tree_fits_uhwi_p (TYPE_SIZE (type))) |
1461 | { |
1462 | unsigned int type_align = TYPE_ALIGN (type); |
1463 | tree dsize = DECL_SIZE (field); |
1464 | HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1465 | HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); |
1466 | HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); |
1467 | |
1468 | #ifdef ADJUST_FIELD_ALIGN |
1469 | if (! TYPE_USER_ALIGN (type)) |
1470 | type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
1471 | #endif |
1472 | |
1473 | if (maximum_field_alignment != 0) |
1474 | type_align = MIN (type_align, maximum_field_alignment); |
1475 | /* ??? This test is opposite the test in the containing if |
1476 | statement, so this code is unreachable currently. */ |
1477 | else if (DECL_PACKED (field)) |
1478 | type_align = MIN (type_align, BITS_PER_UNIT); |
1479 | |
1480 | /* A bit field may not span the unit of alignment of its type. |
1481 | Advance to next boundary if necessary. */ |
1482 | if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) |
1483 | rli->bitpos = round_up (rli->bitpos, type_align); |
1484 | |
1485 | TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); |
1486 | SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, |
1487 | TYPE_WARN_IF_NOT_ALIGN (type)); |
1488 | } |
1489 | #endif |
1490 | |
1491 | /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. |
1492 | A subtlety: |
1493 | When a bit field is inserted into a packed record, the whole |
1494 | size of the underlying type is used by one or more same-size |
1495 | adjacent bitfields. (That is, if its long:3, 32 bits is |
1496 | used in the record, and any additional adjacent long bitfields are |
1497 | packed into the same chunk of 32 bits. However, if the size |
1498 | changes, a new field of that size is allocated.) In an unpacked |
1499 | record, this is the same as using alignment, but not equivalent |
1500 | when packing. |
1501 | |
1502 | Note: for compatibility, we use the type size, not the type alignment |
1503 | to determine alignment, since that matches the documentation */ |
1504 | |
1505 | if (targetm.ms_bitfield_layout_p (rli->t)) |
1506 | { |
1507 | tree prev_saved = rli->prev_field; |
1508 | tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL; |
1509 | |
1510 | /* This is a bitfield if it exists. */ |
1511 | if (rli->prev_field) |
1512 | { |
1513 | bool realign_p = known_align < desired_align; |
1514 | |
1515 | /* If both are bitfields, nonzero, and the same size, this is |
1516 | the middle of a run. Zero declared size fields are special |
1517 | and handled as "end of run". (Note: it's nonzero declared |
1518 | size, but equal type sizes!) (Since we know that both |
1519 | the current and previous fields are bitfields by the |
1520 | time we check it, DECL_SIZE must be present for both.) */ |
1521 | if (DECL_BIT_FIELD_TYPE (field) |
1522 | && !integer_zerop (DECL_SIZE (field)) |
1523 | && !integer_zerop (DECL_SIZE (rli->prev_field)) |
1524 | && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) |
1525 | && tree_fits_uhwi_p (TYPE_SIZE (type)) |
1526 | && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) |
1527 | { |
1528 | /* We're in the middle of a run of equal type size fields; make |
1529 | sure we realign if we run out of bits. (Not decl size, |
1530 | type size!) */ |
1531 | HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); |
1532 | |
1533 | if (rli->remaining_in_alignment < bitsize) |
1534 | { |
1535 | HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); |
1536 | |
1537 | /* out of bits; bump up to next 'word'. */ |
1538 | rli->bitpos |
1539 | = size_binop (PLUS_EXPR, rli->bitpos, |
1540 | bitsize_int (rli->remaining_in_alignment)); |
1541 | rli->prev_field = field; |
1542 | if (typesize < bitsize) |
1543 | rli->remaining_in_alignment = 0; |
1544 | else |
1545 | rli->remaining_in_alignment = typesize - bitsize; |
1546 | } |
1547 | else |
1548 | { |
1549 | rli->remaining_in_alignment -= bitsize; |
1550 | realign_p = false; |
1551 | } |
1552 | } |
1553 | else |
1554 | { |
1555 | /* End of a run: if leaving a run of bitfields of the same type |
1556 | size, we have to "use up" the rest of the bits of the type |
1557 | size. |
1558 | |
1559 | Compute the new position as the sum of the size for the prior |
1560 | type and where we first started working on that type. |
1561 | Note: since the beginning of the field was aligned then |
1562 | of course the end will be too. No round needed. */ |
1563 | |
1564 | if (!integer_zerop (DECL_SIZE (rli->prev_field))) |
1565 | { |
1566 | rli->bitpos |
1567 | = size_binop (PLUS_EXPR, rli->bitpos, |
1568 | bitsize_int (rli->remaining_in_alignment)); |
1569 | } |
1570 | else |
1571 | /* We "use up" size zero fields; the code below should behave |
1572 | as if the prior field was not a bitfield. */ |
1573 | prev_saved = NULL; |
1574 | |
1575 | /* Cause a new bitfield to be captured, either this time (if |
1576 | currently a bitfield) or next time we see one. */ |
1577 | if (!DECL_BIT_FIELD_TYPE (field) |
1578 | || integer_zerop (DECL_SIZE (field))) |
1579 | rli->prev_field = NULL; |
1580 | } |
1581 | |
1582 | /* Does this field automatically have alignment it needs by virtue |
1583 | of the fields that precede it and the record's own alignment? */ |
1584 | if (realign_p) |
1585 | { |
1586 | /* If the alignment is still within offset_align, just align |
1587 | the bit position. */ |
1588 | if (desired_align < rli->offset_align) |
1589 | rli->bitpos = round_up (rli->bitpos, desired_align); |
1590 | else |
1591 | { |
1592 | /* First adjust OFFSET by the partial bits, then align. */ |
1593 | tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos, |
1594 | bitsize_unit_node); |
1595 | rli->offset = size_binop (PLUS_EXPR, rli->offset, |
1596 | fold_convert (sizetype, d)); |
1597 | rli->bitpos = bitsize_zero_node; |
1598 | |
1599 | rli->offset = round_up (rli->offset, |
1600 | desired_align / BITS_PER_UNIT); |
1601 | } |
1602 | |
1603 | if (! TREE_CONSTANT (rli->offset)) |
1604 | rli->offset_align = desired_align; |
1605 | } |
1606 | |
1607 | normalize_rli (rli); |
1608 | } |
1609 | |
1610 | /* If we're starting a new run of same type size bitfields |
1611 | (or a run of non-bitfields), set up the "first of the run" |
1612 | fields. |
1613 | |
1614 | That is, if the current field is not a bitfield, or if there |
1615 | was a prior bitfield the type sizes differ, or if there wasn't |
1616 | a prior bitfield the size of the current field is nonzero. |
1617 | |
1618 | Note: we must be sure to test ONLY the type size if there was |
1619 | a prior bitfield and ONLY for the current field being zero if |
1620 | there wasn't. */ |
1621 | |
1622 | if (!DECL_BIT_FIELD_TYPE (field) |
1623 | || (prev_saved != NULL |
1624 | ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)) |
1625 | : !integer_zerop (DECL_SIZE (field)))) |
1626 | { |
1627 | /* Never smaller than a byte for compatibility. */ |
1628 | unsigned int type_align = BITS_PER_UNIT; |
1629 | |
1630 | /* (When not a bitfield), we could be seeing a flex array (with |
1631 | no DECL_SIZE). Since we won't be using remaining_in_alignment |
1632 | until we see a bitfield (and come by here again) we just skip |
1633 | calculating it. */ |
1634 | if (DECL_SIZE (field) != NULL |
1635 | && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) |
1636 | && tree_fits_uhwi_p (DECL_SIZE (field))) |
1637 | { |
1638 | unsigned HOST_WIDE_INT bitsize |
1639 | = tree_to_uhwi (DECL_SIZE (field)); |
1640 | unsigned HOST_WIDE_INT typesize |
1641 | = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); |
1642 | |
1643 | if (typesize < bitsize) |
1644 | rli->remaining_in_alignment = 0; |
1645 | else |
1646 | rli->remaining_in_alignment = typesize - bitsize; |
1647 | } |
1648 | |
1649 | /* Now align (conventionally) for the new type. */ |
1650 | if (! DECL_PACKED (field)) |
1651 | type_align = TYPE_ALIGN (TREE_TYPE (field)); |
1652 | |
1653 | if (maximum_field_alignment != 0) |
1654 | type_align = MIN (type_align, maximum_field_alignment); |
1655 | |
1656 | rli->bitpos = round_up (rli->bitpos, type_align); |
1657 | |
1658 | /* If we really aligned, don't allow subsequent bitfields |
1659 | to undo that. */ |
1660 | rli->prev_field = NULL; |
1661 | } |
1662 | } |
1663 | |
1664 | /* Offset so far becomes the position of this field after normalizing. */ |
1665 | normalize_rli (rli); |
1666 | DECL_FIELD_OFFSET (field) = rli->offset; |
1667 | DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; |
1668 | SET_DECL_OFFSET_ALIGN (field, rli->offset_align); |
1669 | handle_warn_if_not_align (field, record_align: rli->record_align); |
1670 | |
1671 | /* Evaluate nonconstant offsets only once, either now or as soon as safe. */ |
1672 | if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST) |
1673 | DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field)); |
1674 | |
1675 | /* If this field ended up more aligned than we thought it would be (we |
1676 | approximate this by seeing if its position changed), lay out the field |
1677 | again; perhaps we can use an integral mode for it now. */ |
1678 | if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) |
1679 | actual_align = least_bit_hwi (x: tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); |
1680 | else if (integer_zerop (DECL_FIELD_OFFSET (field))) |
1681 | actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); |
1682 | else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) |
1683 | actual_align = (BITS_PER_UNIT |
1684 | * least_bit_hwi (x: tree_to_uhwi (DECL_FIELD_OFFSET (field)))); |
1685 | else |
1686 | actual_align = DECL_OFFSET_ALIGN (field); |
1687 | /* ACTUAL_ALIGN is still the actual alignment *within the record* . |
1688 | store / extract bit field operations will check the alignment of the |
1689 | record against the mode of bit fields. */ |
1690 | |
1691 | if (known_align != actual_align) |
1692 | layout_decl (decl: field, known_align: actual_align); |
1693 | |
1694 | if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) |
1695 | rli->prev_field = field; |
1696 | |
1697 | /* Now add size of this field to the size of the record. If the size is |
1698 | not constant, treat the field as being a multiple of bytes and just |
1699 | adjust the offset, resetting the bit position. Otherwise, apportion the |
1700 | size amongst the bit position and offset. First handle the case of an |
1701 | unspecified size, which can happen when we have an invalid nested struct |
1702 | definition, such as struct j { struct j { int i; } }. The error message |
1703 | is printed in finish_struct. */ |
1704 | if (DECL_SIZE (field) == 0) |
1705 | /* Do nothing. */; |
1706 | else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST |
1707 | || TREE_OVERFLOW (DECL_SIZE (field))) |
1708 | { |
1709 | rli->offset |
1710 | = size_binop (PLUS_EXPR, rli->offset, |
1711 | fold_convert (sizetype, |
1712 | size_binop (CEIL_DIV_EXPR, rli->bitpos, |
1713 | bitsize_unit_node))); |
1714 | rli->offset |
1715 | = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); |
1716 | rli->bitpos = bitsize_zero_node; |
1717 | rli->offset_align = MIN (rli->offset_align, desired_align); |
1718 | |
1719 | if (!multiple_of_p (bitsizetype, DECL_SIZE (field), |
1720 | bitsize_int (rli->offset_align))) |
1721 | { |
1722 | tree type = strip_array_types (TREE_TYPE (field)); |
1723 | /* The above adjusts offset_align just based on the start of the |
1724 | field. The field might not have a size that is a multiple of |
1725 | that offset_align though. If the field is an array of fixed |
1726 | sized elements, assume there can be any multiple of those |
1727 | sizes. If it is a variable length aggregate or array of |
1728 | variable length aggregates, assume worst that the end is |
1729 | just BITS_PER_UNIT aligned. */ |
1730 | if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) |
1731 | { |
1732 | if (TREE_INT_CST_LOW (TYPE_SIZE (type))) |
1733 | { |
1734 | unsigned HOST_WIDE_INT sz |
1735 | = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type))); |
1736 | rli->offset_align = MIN (rli->offset_align, sz); |
1737 | } |
1738 | } |
1739 | else |
1740 | rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT); |
1741 | } |
1742 | } |
1743 | else if (targetm.ms_bitfield_layout_p (rli->t)) |
1744 | { |
1745 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); |
1746 | |
1747 | /* If FIELD is the last field and doesn't end at the full length |
1748 | of the type then pad the struct out to the full length of the |
1749 | last type. */ |
1750 | if (DECL_BIT_FIELD_TYPE (field) |
1751 | && !integer_zerop (DECL_SIZE (field))) |
1752 | { |
1753 | /* We have to scan, because non-field DECLS are also here. */ |
1754 | tree probe = field; |
1755 | while ((probe = DECL_CHAIN (probe))) |
1756 | if (TREE_CODE (probe) == FIELD_DECL) |
1757 | break; |
1758 | if (!probe) |
1759 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, |
1760 | bitsize_int (rli->remaining_in_alignment)); |
1761 | } |
1762 | |
1763 | normalize_rli (rli); |
1764 | } |
1765 | else |
1766 | { |
1767 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); |
1768 | normalize_rli (rli); |
1769 | } |
1770 | } |
1771 | |
1772 | /* Assuming that all the fields have been laid out, this function uses |
1773 | RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type |
1774 | indicated by RLI. */ |
1775 | |
1776 | static void |
1777 | finalize_record_size (record_layout_info rli) |
1778 | { |
1779 | tree unpadded_size, unpadded_size_unit; |
1780 | |
1781 | /* Now we want just byte and bit offsets, so set the offset alignment |
1782 | to be a byte and then normalize. */ |
1783 | rli->offset_align = BITS_PER_UNIT; |
1784 | normalize_rli (rli); |
1785 | |
1786 | /* Determine the desired alignment. */ |
1787 | #ifdef ROUND_TYPE_ALIGN |
1788 | SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), |
1789 | rli->record_align)); |
1790 | #else |
1791 | SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align)); |
1792 | #endif |
1793 | |
1794 | /* Compute the size so far. Be sure to allow for extra bits in the |
1795 | size in bytes. We have guaranteed above that it will be no more |
1796 | than a single byte. */ |
1797 | unpadded_size = rli_size_so_far (rli); |
1798 | unpadded_size_unit = rli_size_unit_so_far (rli); |
1799 | if (! integer_zerop (rli->bitpos)) |
1800 | unpadded_size_unit |
1801 | = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); |
1802 | |
1803 | /* Round the size up to be a multiple of the required alignment. */ |
1804 | TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); |
1805 | TYPE_SIZE_UNIT (rli->t) |
1806 | = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); |
1807 | |
1808 | if (TREE_CONSTANT (unpadded_size) |
1809 | && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0 |
1810 | && input_location != BUILTINS_LOCATION |
1811 | && !TYPE_ARTIFICIAL (rli->t)) |
1812 | { |
1813 | tree pad_size |
1814 | = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (rli->t), unpadded_size_unit); |
1815 | warning (OPT_Wpadded, |
1816 | "padding struct size to alignment boundary with %E bytes" , pad_size); |
1817 | } |
1818 | |
1819 | if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE |
1820 | && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary |
1821 | && TREE_CONSTANT (unpadded_size)) |
1822 | { |
1823 | tree unpacked_size; |
1824 | |
1825 | #ifdef ROUND_TYPE_ALIGN |
1826 | rli->unpacked_align |
1827 | = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); |
1828 | #else |
1829 | rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); |
1830 | #endif |
1831 | |
1832 | unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align); |
1833 | if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) |
1834 | { |
1835 | if (TYPE_NAME (rli->t)) |
1836 | { |
1837 | tree name; |
1838 | |
1839 | if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) |
1840 | name = TYPE_NAME (rli->t); |
1841 | else |
1842 | name = DECL_NAME (TYPE_NAME (rli->t)); |
1843 | |
1844 | if (STRICT_ALIGNMENT) |
1845 | warning (OPT_Wpacked, "packed attribute causes inefficient " |
1846 | "alignment for %qE" , name); |
1847 | else |
1848 | warning (OPT_Wpacked, |
1849 | "packed attribute is unnecessary for %qE" , name); |
1850 | } |
1851 | else |
1852 | { |
1853 | if (STRICT_ALIGNMENT) |
1854 | warning (OPT_Wpacked, |
1855 | "packed attribute causes inefficient alignment" ); |
1856 | else |
1857 | warning (OPT_Wpacked, "packed attribute is unnecessary" ); |
1858 | } |
1859 | } |
1860 | } |
1861 | } |
1862 | |
1863 | /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ |
1864 | |
1865 | void |
1866 | compute_record_mode (tree type) |
1867 | { |
1868 | tree field; |
1869 | machine_mode mode = VOIDmode; |
1870 | |
1871 | /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. |
1872 | However, if possible, we use a mode that fits in a register |
1873 | instead, in order to allow for better optimization down the |
1874 | line. */ |
1875 | SET_TYPE_MODE (type, BLKmode); |
1876 | |
1877 | poly_uint64 type_size; |
1878 | if (!poly_int_tree_p (TYPE_SIZE (type), value: &type_size)) |
1879 | return; |
1880 | |
1881 | /* A record which has any BLKmode members must itself be |
1882 | BLKmode; it can't go in a register. Unless the member is |
1883 | BLKmode only because it isn't aligned. */ |
1884 | for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
1885 | { |
1886 | if (TREE_CODE (field) != FIELD_DECL) |
1887 | continue; |
1888 | |
1889 | poly_uint64 field_size; |
1890 | if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK |
1891 | || (TYPE_MODE (TREE_TYPE (field)) == BLKmode |
1892 | && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) |
1893 | && !(TYPE_SIZE (TREE_TYPE (field)) != 0 |
1894 | && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) |
1895 | || !tree_fits_poly_uint64_p (bit_position (field)) |
1896 | || DECL_SIZE (field) == 0 |
1897 | || !poly_int_tree_p (DECL_SIZE (field), value: &field_size)) |
1898 | return; |
1899 | |
1900 | /* If this field is the whole struct, remember its mode so |
1901 | that, say, we can put a double in a class into a DF |
1902 | register instead of forcing it to live in the stack. */ |
1903 | if (known_eq (field_size, type_size) |
1904 | /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to |
1905 | wider types (e.g. int32), despite precision being less. Ensure |
1906 | that the TYPE_MODE of the struct does not get set to the partial |
1907 | int mode if there is a wider type also in the struct. */ |
1908 | && known_gt (GET_MODE_PRECISION (DECL_MODE (field)), |
1909 | GET_MODE_PRECISION (mode))) |
1910 | mode = DECL_MODE (field); |
1911 | |
1912 | /* With some targets, it is sub-optimal to access an aligned |
1913 | BLKmode structure as a scalar. */ |
1914 | if (targetm.member_type_forces_blk (field, mode)) |
1915 | return; |
1916 | } |
1917 | |
1918 | /* If we only have one real field; use its mode if that mode's size |
1919 | matches the type's size. This generally only applies to RECORD_TYPE. |
1920 | For UNION_TYPE, if the widest field is MODE_INT then use that mode. |
1921 | If the widest field is MODE_PARTIAL_INT, and the union will be passed |
1922 | by reference, then use that mode. */ |
1923 | if ((TREE_CODE (type) == RECORD_TYPE |
1924 | || (TREE_CODE (type) == UNION_TYPE |
1925 | && (GET_MODE_CLASS (mode) == MODE_INT |
1926 | || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT |
1927 | && (targetm.calls.pass_by_reference |
1928 | (pack_cumulative_args (arg: 0), |
1929 | function_arg_info (type, mode, /*named=*/false))))))) |
1930 | && mode != VOIDmode |
1931 | && known_eq (GET_MODE_BITSIZE (mode), type_size)) |
1932 | ; |
1933 | else |
1934 | mode = mode_for_size_tree (TYPE_SIZE (type), mclass: MODE_INT, limit: 1).else_blk (); |
1935 | |
1936 | /* If structure's known alignment is less than what the scalar |
1937 | mode would need, and it matters, then stick with BLKmode. */ |
1938 | if (mode != BLKmode |
1939 | && STRICT_ALIGNMENT |
1940 | && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT |
1941 | || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode))) |
1942 | { |
1943 | /* If this is the only reason this type is BLKmode, then |
1944 | don't force containing types to be BLKmode. */ |
1945 | TYPE_NO_FORCE_BLK (type) = 1; |
1946 | mode = BLKmode; |
1947 | } |
1948 | |
1949 | SET_TYPE_MODE (type, mode); |
1950 | } |
1951 | |
1952 | /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid |
1953 | out. */ |
1954 | |
1955 | static void |
1956 | finalize_type_size (tree type) |
1957 | { |
1958 | /* Normally, use the alignment corresponding to the mode chosen. |
1959 | However, where strict alignment is not required, avoid |
1960 | over-aligning structures, since most compilers do not do this |
1961 | alignment. */ |
1962 | bool tua_cleared_p = false; |
1963 | if (TYPE_MODE (type) != BLKmode |
1964 | && TYPE_MODE (type) != VOIDmode |
1965 | && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type))) |
1966 | { |
1967 | unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); |
1968 | |
1969 | /* Don't override a larger alignment requirement coming from a user |
1970 | alignment of one of the fields. */ |
1971 | if (mode_align >= TYPE_ALIGN (type)) |
1972 | { |
1973 | SET_TYPE_ALIGN (type, mode_align); |
1974 | /* Remember that we're about to reset this flag. */ |
1975 | tua_cleared_p = TYPE_USER_ALIGN (type); |
1976 | TYPE_USER_ALIGN (type) = false; |
1977 | } |
1978 | } |
1979 | |
1980 | /* Do machine-dependent extra alignment. */ |
1981 | #ifdef ROUND_TYPE_ALIGN |
1982 | SET_TYPE_ALIGN (type, |
1983 | ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT)); |
1984 | #endif |
1985 | |
1986 | /* If we failed to find a simple way to calculate the unit size |
1987 | of the type, find it by division. */ |
1988 | if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) |
1989 | /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the |
1990 | result will fit in sizetype. We will get more efficient code using |
1991 | sizetype, so we force a conversion. */ |
1992 | TYPE_SIZE_UNIT (type) |
1993 | = fold_convert (sizetype, |
1994 | size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), |
1995 | bitsize_unit_node)); |
1996 | |
1997 | if (TYPE_SIZE (type) != 0) |
1998 | { |
1999 | TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); |
2000 | TYPE_SIZE_UNIT (type) |
2001 | = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type)); |
2002 | } |
2003 | |
2004 | /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ |
2005 | if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) |
2006 | TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); |
2007 | if (TYPE_SIZE_UNIT (type) != 0 |
2008 | && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) |
2009 | TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); |
2010 | |
2011 | /* Handle empty records as per the x86-64 psABI. */ |
2012 | TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type); |
2013 | |
2014 | /* Also layout any other variants of the type. */ |
2015 | if (TYPE_NEXT_VARIANT (type) |
2016 | || type != TYPE_MAIN_VARIANT (type)) |
2017 | { |
2018 | tree variant; |
2019 | /* Record layout info of this variant. */ |
2020 | tree size = TYPE_SIZE (type); |
2021 | tree size_unit = TYPE_SIZE_UNIT (type); |
2022 | unsigned int align = TYPE_ALIGN (type); |
2023 | unsigned int precision = TYPE_PRECISION (type); |
2024 | unsigned int user_align = TYPE_USER_ALIGN (type); |
2025 | machine_mode mode = TYPE_MODE (type); |
2026 | bool empty_p = TYPE_EMPTY_P (type); |
2027 | bool typeless = AGGREGATE_TYPE_P (type) && TYPE_TYPELESS_STORAGE (type); |
2028 | |
2029 | /* Copy it into all variants. */ |
2030 | for (variant = TYPE_MAIN_VARIANT (type); |
2031 | variant != NULL_TREE; |
2032 | variant = TYPE_NEXT_VARIANT (variant)) |
2033 | { |
2034 | TYPE_SIZE (variant) = size; |
2035 | TYPE_SIZE_UNIT (variant) = size_unit; |
2036 | unsigned valign = align; |
2037 | if (TYPE_USER_ALIGN (variant)) |
2038 | { |
2039 | valign = MAX (valign, TYPE_ALIGN (variant)); |
2040 | /* If we reset TYPE_USER_ALIGN on the main variant, we might |
2041 | need to reset it on the variants too. TYPE_MODE will be set |
2042 | to MODE in this variant, so we can use that. */ |
2043 | if (tua_cleared_p && GET_MODE_ALIGNMENT (mode) >= valign) |
2044 | TYPE_USER_ALIGN (variant) = false; |
2045 | } |
2046 | else |
2047 | TYPE_USER_ALIGN (variant) = user_align; |
2048 | SET_TYPE_ALIGN (variant, valign); |
2049 | TYPE_PRECISION (variant) = precision; |
2050 | SET_TYPE_MODE (variant, mode); |
2051 | TYPE_EMPTY_P (variant) = empty_p; |
2052 | if (AGGREGATE_TYPE_P (variant)) |
2053 | TYPE_TYPELESS_STORAGE (variant) = typeless; |
2054 | } |
2055 | } |
2056 | } |
2057 | |
2058 | /* Return a new underlying object for a bitfield started with FIELD. */ |
2059 | |
2060 | static tree |
2061 | start_bitfield_representative (tree field) |
2062 | { |
2063 | tree repr = make_node (FIELD_DECL); |
2064 | DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field); |
2065 | /* Force the representative to begin at a BITS_PER_UNIT aligned |
2066 | boundary - C++ may use tail-padding of a base object to |
2067 | continue packing bits so the bitfield region does not start |
2068 | at bit zero (see g++.dg/abi/bitfield5.C for example). |
2069 | Unallocated bits may happen for other reasons as well, |
2070 | for example Ada which allows explicit bit-granular structure layout. */ |
2071 | DECL_FIELD_BIT_OFFSET (repr) |
2072 | = size_binop (BIT_AND_EXPR, |
2073 | DECL_FIELD_BIT_OFFSET (field), |
2074 | bitsize_int (~(BITS_PER_UNIT - 1))); |
2075 | SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field)); |
2076 | DECL_SIZE (repr) = DECL_SIZE (field); |
2077 | DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field); |
2078 | DECL_PACKED (repr) = DECL_PACKED (field); |
2079 | DECL_CONTEXT (repr) = DECL_CONTEXT (field); |
2080 | /* There are no indirect accesses to this field. If we introduce |
2081 | some then they have to use the record alias set. This makes |
2082 | sure to properly conflict with [indirect] accesses to addressable |
2083 | fields of the bitfield group. */ |
2084 | DECL_NONADDRESSABLE_P (repr) = 1; |
2085 | return repr; |
2086 | } |
2087 | |
2088 | /* Finish up a bitfield group that was started by creating the underlying |
2089 | object REPR with the last field in the bitfield group FIELD. */ |
2090 | |
2091 | static void |
2092 | finish_bitfield_representative (tree repr, tree field) |
2093 | { |
2094 | unsigned HOST_WIDE_INT bitsize, maxbitsize; |
2095 | tree nextf, size; |
2096 | |
2097 | size = size_diffop (DECL_FIELD_OFFSET (field), |
2098 | DECL_FIELD_OFFSET (repr)); |
2099 | while (TREE_CODE (size) == COMPOUND_EXPR) |
2100 | size = TREE_OPERAND (size, 1); |
2101 | gcc_assert (tree_fits_uhwi_p (size)); |
2102 | bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT |
2103 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) |
2104 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) |
2105 | + tree_to_uhwi (DECL_SIZE (field))); |
2106 | |
2107 | /* Round up bitsize to multiples of BITS_PER_UNIT. */ |
2108 | bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); |
2109 | |
2110 | /* Now nothing tells us how to pad out bitsize ... */ |
2111 | if (TREE_CODE (DECL_CONTEXT (field)) == RECORD_TYPE) |
2112 | { |
2113 | nextf = DECL_CHAIN (field); |
2114 | while (nextf && TREE_CODE (nextf) != FIELD_DECL) |
2115 | nextf = DECL_CHAIN (nextf); |
2116 | } |
2117 | else |
2118 | nextf = NULL_TREE; |
2119 | if (nextf) |
2120 | { |
2121 | tree maxsize; |
2122 | /* If there was an error, the field may be not laid out |
2123 | correctly. Don't bother to do anything. */ |
2124 | if (TREE_TYPE (nextf) == error_mark_node) |
2125 | { |
2126 | TREE_TYPE (repr) = error_mark_node; |
2127 | return; |
2128 | } |
2129 | maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), |
2130 | DECL_FIELD_OFFSET (repr)); |
2131 | if (tree_fits_uhwi_p (maxsize)) |
2132 | { |
2133 | maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT |
2134 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) |
2135 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); |
2136 | /* If the group ends within a bitfield nextf does not need to be |
2137 | aligned to BITS_PER_UNIT. Thus round up. */ |
2138 | maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); |
2139 | } |
2140 | else |
2141 | maxbitsize = bitsize; |
2142 | } |
2143 | else |
2144 | { |
2145 | /* Note that if the C++ FE sets up tail-padding to be re-used it |
2146 | creates a as-base variant of the type with TYPE_SIZE adjusted |
2147 | accordingly. So it is safe to include tail-padding here. */ |
2148 | tree aggsize = lang_hooks.types.unit_size_without_reusable_padding |
2149 | (DECL_CONTEXT (field)); |
2150 | tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr)); |
2151 | /* We cannot generally rely on maxsize to fold to an integer constant, |
2152 | so use bitsize as fallback for this case. */ |
2153 | if (tree_fits_uhwi_p (maxsize)) |
2154 | maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT |
2155 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); |
2156 | else |
2157 | maxbitsize = bitsize; |
2158 | } |
2159 | |
2160 | /* Only if we don't artificially break up the representative in |
2161 | the middle of a large bitfield with different possibly |
2162 | overlapping representatives. And all representatives start |
2163 | at byte offset. */ |
2164 | gcc_assert (maxbitsize % BITS_PER_UNIT == 0); |
2165 | |
2166 | /* Find the smallest nice mode to use. */ |
2167 | opt_scalar_int_mode mode_iter; |
2168 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT) |
2169 | if (GET_MODE_BITSIZE (mode: mode_iter.require ()) >= bitsize) |
2170 | break; |
2171 | |
2172 | scalar_int_mode mode; |
2173 | if (!mode_iter.exists (mode: &mode) |
2174 | || GET_MODE_BITSIZE (mode) > maxbitsize |
2175 | || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE) |
2176 | { |
2177 | if (TREE_CODE (TREE_TYPE (field)) == BITINT_TYPE) |
2178 | { |
2179 | struct bitint_info info; |
2180 | unsigned prec = TYPE_PRECISION (TREE_TYPE (field)); |
2181 | bool ok = targetm.c.bitint_type_info (prec, &info); |
2182 | gcc_assert (ok); |
2183 | scalar_int_mode limb_mode |
2184 | = as_a <scalar_int_mode> (m: info.abi_limb_mode); |
2185 | unsigned lprec = GET_MODE_PRECISION (mode: limb_mode); |
2186 | if (prec > lprec) |
2187 | { |
2188 | /* For middle/large/huge _BitInt prefer bitsize being a multiple |
2189 | of limb precision. */ |
2190 | unsigned HOST_WIDE_INT bsz = CEIL (bitsize, lprec) * lprec; |
2191 | if (bsz <= maxbitsize) |
2192 | bitsize = bsz; |
2193 | } |
2194 | } |
2195 | /* We really want a BLKmode representative only as a last resort, |
2196 | considering the member b in |
2197 | struct { int a : 7; int b : 17; int c; } __attribute__((packed)); |
2198 | Otherwise we simply want to split the representative up |
2199 | allowing for overlaps within the bitfield region as required for |
2200 | struct { int a : 7; int b : 7; |
2201 | int c : 10; int d; } __attribute__((packed)); |
2202 | [0, 15] HImode for a and b, [8, 23] HImode for c. */ |
2203 | DECL_SIZE (repr) = bitsize_int (bitsize); |
2204 | DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT); |
2205 | SET_DECL_MODE (repr, BLKmode); |
2206 | TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node, |
2207 | bitsize / BITS_PER_UNIT); |
2208 | } |
2209 | else |
2210 | { |
2211 | unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode); |
2212 | DECL_SIZE (repr) = bitsize_int (modesize); |
2213 | DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT); |
2214 | SET_DECL_MODE (repr, mode); |
2215 | TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1); |
2216 | } |
2217 | |
2218 | /* Remember whether the bitfield group is at the end of the |
2219 | structure or not. */ |
2220 | DECL_CHAIN (repr) = nextf; |
2221 | } |
2222 | |
2223 | /* Compute and set FIELD_DECLs for the underlying objects we should |
2224 | use for bitfield access for the structure T. */ |
2225 | |
2226 | void |
2227 | finish_bitfield_layout (tree t) |
2228 | { |
2229 | tree field, prev; |
2230 | tree repr = NULL_TREE; |
2231 | |
2232 | if (TREE_CODE (t) == QUAL_UNION_TYPE) |
2233 | return; |
2234 | |
2235 | for (prev = NULL_TREE, field = TYPE_FIELDS (t); |
2236 | field; field = DECL_CHAIN (field)) |
2237 | { |
2238 | if (TREE_CODE (field) != FIELD_DECL) |
2239 | continue; |
2240 | |
2241 | /* In the C++ memory model, consecutive bit fields in a structure are |
2242 | considered one memory location and updating a memory location |
2243 | may not store into adjacent memory locations. */ |
2244 | if (!repr |
2245 | && DECL_BIT_FIELD_TYPE (field)) |
2246 | { |
2247 | /* Start new representative. */ |
2248 | repr = start_bitfield_representative (field); |
2249 | } |
2250 | else if (repr |
2251 | && ! DECL_BIT_FIELD_TYPE (field)) |
2252 | { |
2253 | /* Finish off new representative. */ |
2254 | finish_bitfield_representative (repr, field: prev); |
2255 | repr = NULL_TREE; |
2256 | } |
2257 | else if (DECL_BIT_FIELD_TYPE (field)) |
2258 | { |
2259 | gcc_assert (repr != NULL_TREE); |
2260 | |
2261 | /* Zero-size bitfields finish off a representative and |
2262 | do not have a representative themselves. This is |
2263 | required by the C++ memory model. */ |
2264 | if (integer_zerop (DECL_SIZE (field))) |
2265 | { |
2266 | finish_bitfield_representative (repr, field: prev); |
2267 | repr = NULL_TREE; |
2268 | } |
2269 | |
2270 | /* We assume that either DECL_FIELD_OFFSET of the representative |
2271 | and each bitfield member is a constant or they are equal. |
2272 | This is because we need to be able to compute the bit-offset |
2273 | of each field relative to the representative in get_bit_range |
2274 | during RTL expansion. |
2275 | If these constraints are not met, simply force a new |
2276 | representative to be generated. That will at most |
2277 | generate worse code but still maintain correctness with |
2278 | respect to the C++ memory model. */ |
2279 | else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) |
2280 | && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) |
2281 | || operand_equal_p (DECL_FIELD_OFFSET (repr), |
2282 | DECL_FIELD_OFFSET (field), flags: 0))) |
2283 | { |
2284 | finish_bitfield_representative (repr, field: prev); |
2285 | repr = start_bitfield_representative (field); |
2286 | } |
2287 | } |
2288 | else |
2289 | continue; |
2290 | |
2291 | if (repr) |
2292 | DECL_BIT_FIELD_REPRESENTATIVE (field) = repr; |
2293 | |
2294 | if (TREE_CODE (t) == RECORD_TYPE) |
2295 | prev = field; |
2296 | else if (repr) |
2297 | { |
2298 | finish_bitfield_representative (repr, field); |
2299 | repr = NULL_TREE; |
2300 | } |
2301 | } |
2302 | |
2303 | if (repr) |
2304 | finish_bitfield_representative (repr, field: prev); |
2305 | } |
2306 | |
2307 | /* Do all of the work required to layout the type indicated by RLI, |
2308 | once the fields have been laid out. This function will call `free' |
2309 | for RLI, unless FREE_P is false. Passing a value other than false |
2310 | for FREE_P is bad practice; this option only exists to support the |
2311 | G++ 3.2 ABI. */ |
2312 | |
2313 | void |
2314 | finish_record_layout (record_layout_info rli, int free_p) |
2315 | { |
2316 | tree variant; |
2317 | |
2318 | /* Compute the final size. */ |
2319 | finalize_record_size (rli); |
2320 | |
2321 | /* Compute the TYPE_MODE for the record. */ |
2322 | compute_record_mode (type: rli->t); |
2323 | |
2324 | /* Perform any last tweaks to the TYPE_SIZE, etc. */ |
2325 | finalize_type_size (type: rli->t); |
2326 | |
2327 | /* Compute bitfield representatives. */ |
2328 | finish_bitfield_layout (t: rli->t); |
2329 | |
2330 | /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants. |
2331 | With C++ templates, it is too early to do this when the attribute |
2332 | is being parsed. */ |
2333 | for (variant = TYPE_NEXT_VARIANT (rli->t); variant; |
2334 | variant = TYPE_NEXT_VARIANT (variant)) |
2335 | { |
2336 | TYPE_PACKED (variant) = TYPE_PACKED (rli->t); |
2337 | TYPE_REVERSE_STORAGE_ORDER (variant) |
2338 | = TYPE_REVERSE_STORAGE_ORDER (rli->t); |
2339 | } |
2340 | |
2341 | /* Lay out any static members. This is done now because their type |
2342 | may use the record's type. */ |
2343 | while (!vec_safe_is_empty (v: rli->pending_statics)) |
2344 | layout_decl (decl: rli->pending_statics->pop (), known_align: 0); |
2345 | |
2346 | /* Clean up. */ |
2347 | if (free_p) |
2348 | { |
2349 | vec_free (v&: rli->pending_statics); |
2350 | free (ptr: rli); |
2351 | } |
2352 | } |
2353 | |
2354 | |
2355 | /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is |
2356 | NAME, its fields are chained in reverse on FIELDS. |
2357 | |
2358 | If ALIGN_TYPE is non-null, it is given the same alignment as |
2359 | ALIGN_TYPE. */ |
2360 | |
2361 | void |
2362 | finish_builtin_struct (tree type, const char *name, tree fields, |
2363 | tree align_type) |
2364 | { |
2365 | tree tail, next; |
2366 | |
2367 | for (tail = NULL_TREE; fields; tail = fields, fields = next) |
2368 | { |
2369 | DECL_FIELD_CONTEXT (fields) = type; |
2370 | next = DECL_CHAIN (fields); |
2371 | DECL_CHAIN (fields) = tail; |
2372 | } |
2373 | TYPE_FIELDS (type) = tail; |
2374 | |
2375 | if (align_type) |
2376 | { |
2377 | SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type)); |
2378 | TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); |
2379 | SET_TYPE_WARN_IF_NOT_ALIGN (type, |
2380 | TYPE_WARN_IF_NOT_ALIGN (align_type)); |
2381 | } |
2382 | |
2383 | layout_type (type); |
2384 | #if 0 /* not yet, should get fixed properly later */ |
2385 | TYPE_NAME (type) = make_type_decl (get_identifier (name), type); |
2386 | #else |
2387 | TYPE_NAME (type) = build_decl (BUILTINS_LOCATION, |
2388 | TYPE_DECL, get_identifier (name), type); |
2389 | #endif |
2390 | TYPE_STUB_DECL (type) = TYPE_NAME (type); |
2391 | layout_decl (TYPE_NAME (type), known_align: 0); |
2392 | } |
2393 | |
2394 | /* Calculate the mode, size, and alignment for TYPE. |
2395 | For an array type, calculate the element separation as well. |
2396 | Record TYPE on the chain of permanent or temporary types |
2397 | so that dbxout will find out about it. |
2398 | |
2399 | TYPE_SIZE of a type is nonzero if the type has been laid out already. |
2400 | layout_type does nothing on such a type. |
2401 | |
2402 | If the type is incomplete, its TYPE_SIZE remains zero. */ |
2403 | |
2404 | void |
2405 | layout_type (tree type) |
2406 | { |
2407 | gcc_assert (type); |
2408 | |
2409 | if (type == error_mark_node) |
2410 | return; |
2411 | |
2412 | /* We don't want finalize_type_size to copy an alignment attribute to |
2413 | variants that don't have it. */ |
2414 | type = TYPE_MAIN_VARIANT (type); |
2415 | |
2416 | /* Do nothing if type has been laid out before. */ |
2417 | if (TYPE_SIZE (type)) |
2418 | return; |
2419 | |
2420 | switch (TREE_CODE (type)) |
2421 | { |
2422 | case LANG_TYPE: |
2423 | /* This kind of type is the responsibility |
2424 | of the language-specific code. */ |
2425 | gcc_unreachable (); |
2426 | |
2427 | case BOOLEAN_TYPE: |
2428 | case INTEGER_TYPE: |
2429 | case ENUMERAL_TYPE: |
2430 | { |
2431 | scalar_int_mode mode |
2432 | = smallest_int_mode_for_size (TYPE_PRECISION (type)); |
2433 | SET_TYPE_MODE (type, mode); |
2434 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
2435 | /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */ |
2436 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2437 | break; |
2438 | } |
2439 | |
2440 | case BITINT_TYPE: |
2441 | { |
2442 | struct bitint_info info; |
2443 | int cnt; |
2444 | bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type), &info); |
2445 | gcc_assert (ok); |
2446 | scalar_int_mode limb_mode |
2447 | = as_a <scalar_int_mode> (m: info.abi_limb_mode); |
2448 | if (TYPE_PRECISION (type) <= GET_MODE_PRECISION (mode: limb_mode)) |
2449 | { |
2450 | SET_TYPE_MODE (type, limb_mode); |
2451 | gcc_assert (info.abi_limb_mode == info.limb_mode); |
2452 | cnt = 1; |
2453 | } |
2454 | else |
2455 | { |
2456 | SET_TYPE_MODE (type, BLKmode); |
2457 | cnt = CEIL (TYPE_PRECISION (type), GET_MODE_PRECISION (limb_mode)); |
2458 | gcc_assert (info.abi_limb_mode == info.limb_mode |
2459 | || !info.big_endian == !WORDS_BIG_ENDIAN); |
2460 | } |
2461 | TYPE_SIZE (type) = bitsize_int (cnt * GET_MODE_BITSIZE (limb_mode)); |
2462 | TYPE_SIZE_UNIT (type) = size_int (cnt * GET_MODE_SIZE (limb_mode)); |
2463 | SET_TYPE_ALIGN (type, GET_MODE_ALIGNMENT (limb_mode)); |
2464 | if (cnt > 1) |
2465 | { |
2466 | /* Use same mode as compute_record_mode would use for a structure |
2467 | containing cnt limb_mode elements. */ |
2468 | machine_mode mode = mode_for_size_tree (TYPE_SIZE (type), |
2469 | mclass: MODE_INT, limit: 1).else_blk (); |
2470 | if (mode == BLKmode) |
2471 | break; |
2472 | finalize_type_size (type); |
2473 | SET_TYPE_MODE (type, mode); |
2474 | if (STRICT_ALIGNMENT |
2475 | && !(TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT |
2476 | || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode))) |
2477 | { |
2478 | /* If this is the only reason this type is BLKmode, then |
2479 | don't force containing types to be BLKmode. */ |
2480 | TYPE_NO_FORCE_BLK (type) = 1; |
2481 | SET_TYPE_MODE (type, BLKmode); |
2482 | } |
2483 | if (TYPE_NEXT_VARIANT (type) || type != TYPE_MAIN_VARIANT (type)) |
2484 | for (tree variant = TYPE_MAIN_VARIANT (type); |
2485 | variant != NULL_TREE; |
2486 | variant = TYPE_NEXT_VARIANT (variant)) |
2487 | { |
2488 | SET_TYPE_MODE (variant, mode); |
2489 | if (STRICT_ALIGNMENT |
2490 | && !(TYPE_ALIGN (variant) >= BIGGEST_ALIGNMENT |
2491 | || (TYPE_ALIGN (variant) |
2492 | >= GET_MODE_ALIGNMENT (mode)))) |
2493 | { |
2494 | TYPE_NO_FORCE_BLK (variant) = 1; |
2495 | SET_TYPE_MODE (variant, BLKmode); |
2496 | } |
2497 | } |
2498 | return; |
2499 | } |
2500 | break; |
2501 | } |
2502 | |
2503 | case REAL_TYPE: |
2504 | { |
2505 | /* Allow the caller to choose the type mode, which is how decimal |
2506 | floats are distinguished from binary ones. */ |
2507 | if (TYPE_MODE (type) == VOIDmode) |
2508 | SET_TYPE_MODE |
2509 | (type, float_mode_for_size (TYPE_PRECISION (type)).require ()); |
2510 | scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type)); |
2511 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
2512 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2513 | break; |
2514 | } |
2515 | |
2516 | case FIXED_POINT_TYPE: |
2517 | { |
2518 | /* TYPE_MODE (type) has been set already. */ |
2519 | scalar_mode mode = SCALAR_TYPE_MODE (type); |
2520 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
2521 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2522 | break; |
2523 | } |
2524 | |
2525 | case COMPLEX_TYPE: |
2526 | TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); |
2527 | if (TYPE_MODE (TREE_TYPE (type)) == BLKmode) |
2528 | { |
2529 | gcc_checking_assert (TREE_CODE (TREE_TYPE (type)) == BITINT_TYPE); |
2530 | SET_TYPE_MODE (type, BLKmode); |
2531 | TYPE_SIZE (type) |
2532 | = int_const_binop (MULT_EXPR, TYPE_SIZE (TREE_TYPE (type)), |
2533 | bitsize_int (2)); |
2534 | TYPE_SIZE_UNIT (type) |
2535 | = int_const_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (type)), |
2536 | bitsize_int (2)); |
2537 | break; |
2538 | } |
2539 | SET_TYPE_MODE (type, |
2540 | GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type)))); |
2541 | |
2542 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
2543 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); |
2544 | break; |
2545 | |
2546 | case VECTOR_TYPE: |
2547 | { |
2548 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (node: type); |
2549 | tree innertype = TREE_TYPE (type); |
2550 | |
2551 | /* Find an appropriate mode for the vector type. */ |
2552 | if (TYPE_MODE (type) == VOIDmode) |
2553 | SET_TYPE_MODE (type, |
2554 | mode_for_vector (SCALAR_TYPE_MODE (innertype), |
2555 | nunits).else_blk ()); |
2556 | |
2557 | TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type)); |
2558 | TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); |
2559 | /* Several boolean vector elements may fit in a single unit. */ |
2560 | if (VECTOR_BOOLEAN_TYPE_P (type) |
2561 | && type->type_common.mode != BLKmode) |
2562 | TYPE_SIZE_UNIT (type) |
2563 | = size_int (GET_MODE_SIZE (type->type_common.mode)); |
2564 | else |
2565 | TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR, |
2566 | TYPE_SIZE_UNIT (innertype), |
2567 | size_int (nunits)); |
2568 | TYPE_SIZE (type) = int_const_binop |
2569 | (MULT_EXPR, |
2570 | bits_from_bytes (TYPE_SIZE_UNIT (type)), |
2571 | bitsize_int (BITS_PER_UNIT)); |
2572 | |
2573 | /* For vector types, we do not default to the mode's alignment. |
2574 | Instead, query a target hook, defaulting to natural alignment. |
2575 | This prevents ABI changes depending on whether or not native |
2576 | vector modes are supported. */ |
2577 | SET_TYPE_ALIGN (type, targetm.vector_alignment (type)); |
2578 | |
2579 | /* However, if the underlying mode requires a bigger alignment than |
2580 | what the target hook provides, we cannot use the mode. For now, |
2581 | simply reject that case. */ |
2582 | gcc_assert (TYPE_ALIGN (type) |
2583 | >= GET_MODE_ALIGNMENT (TYPE_MODE (type))); |
2584 | break; |
2585 | } |
2586 | |
2587 | case VOID_TYPE: |
2588 | /* This is an incomplete type and so doesn't have a size. */ |
2589 | SET_TYPE_ALIGN (type, 1); |
2590 | TYPE_USER_ALIGN (type) = 0; |
2591 | SET_TYPE_MODE (type, VOIDmode); |
2592 | break; |
2593 | |
2594 | case OFFSET_TYPE: |
2595 | TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); |
2596 | TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS); |
2597 | /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be |
2598 | integral, which may be an __intN. */ |
2599 | SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ()); |
2600 | TYPE_PRECISION (type) = POINTER_SIZE; |
2601 | break; |
2602 | |
2603 | case FUNCTION_TYPE: |
2604 | case METHOD_TYPE: |
2605 | /* It's hard to see what the mode and size of a function ought to |
2606 | be, but we do know the alignment is FUNCTION_BOUNDARY, so |
2607 | make it consistent with that. */ |
2608 | SET_TYPE_MODE (type, |
2609 | int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ()); |
2610 | TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); |
2611 | TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); |
2612 | break; |
2613 | |
2614 | case POINTER_TYPE: |
2615 | case REFERENCE_TYPE: |
2616 | { |
2617 | scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type); |
2618 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
2619 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2620 | TYPE_UNSIGNED (type) = 1; |
2621 | TYPE_PRECISION (type) = GET_MODE_PRECISION (mode); |
2622 | } |
2623 | break; |
2624 | |
2625 | case ARRAY_TYPE: |
2626 | { |
2627 | tree index = TYPE_DOMAIN (type); |
2628 | tree element = TREE_TYPE (type); |
2629 | |
2630 | /* We need to know both bounds in order to compute the size. */ |
2631 | if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) |
2632 | && TYPE_SIZE (element)) |
2633 | { |
2634 | tree ub = TYPE_MAX_VALUE (index); |
2635 | tree lb = TYPE_MIN_VALUE (index); |
2636 | tree element_size = TYPE_SIZE (element); |
2637 | tree length; |
2638 | |
2639 | /* Make sure that an array of zero-sized element is zero-sized |
2640 | regardless of its extent. */ |
2641 | if (integer_zerop (element_size)) |
2642 | length = size_zero_node; |
2643 | |
2644 | /* The computation should happen in the original signedness so |
2645 | that (possible) negative values are handled appropriately |
2646 | when determining overflow. */ |
2647 | else |
2648 | { |
2649 | /* ??? When it is obvious that the range is signed |
2650 | represent it using ssizetype. */ |
2651 | if (TREE_CODE (lb) == INTEGER_CST |
2652 | && TREE_CODE (ub) == INTEGER_CST |
2653 | && TYPE_UNSIGNED (TREE_TYPE (lb)) |
2654 | && tree_int_cst_lt (t1: ub, t2: lb)) |
2655 | { |
2656 | lb = wide_int_to_tree (ssizetype, |
2657 | cst: offset_int::from (x: wi::to_wide (t: lb), |
2658 | sgn: SIGNED)); |
2659 | ub = wide_int_to_tree (ssizetype, |
2660 | cst: offset_int::from (x: wi::to_wide (t: ub), |
2661 | sgn: SIGNED)); |
2662 | } |
2663 | length |
2664 | = fold_convert (sizetype, |
2665 | size_binop (PLUS_EXPR, |
2666 | build_int_cst (TREE_TYPE (lb), 1), |
2667 | size_binop (MINUS_EXPR, ub, lb))); |
2668 | } |
2669 | |
2670 | /* ??? We have no way to distinguish a null-sized array from an |
2671 | array spanning the whole sizetype range, so we arbitrarily |
2672 | decide that [0, -1] is the only valid representation. */ |
2673 | if (integer_zerop (length) |
2674 | && TREE_OVERFLOW (length) |
2675 | && integer_zerop (lb)) |
2676 | length = size_zero_node; |
2677 | |
2678 | TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, |
2679 | bits_from_bytes (length)); |
2680 | |
2681 | /* If we know the size of the element, calculate the total size |
2682 | directly, rather than do some division thing below. This |
2683 | optimization helps Fortran assumed-size arrays (where the |
2684 | size of the array is determined at runtime) substantially. */ |
2685 | if (TYPE_SIZE_UNIT (element)) |
2686 | TYPE_SIZE_UNIT (type) |
2687 | = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); |
2688 | } |
2689 | |
2690 | /* Now round the alignment and size, |
2691 | using machine-dependent criteria if any. */ |
2692 | |
2693 | unsigned align = TYPE_ALIGN (element); |
2694 | if (TYPE_USER_ALIGN (type)) |
2695 | align = MAX (align, TYPE_ALIGN (type)); |
2696 | else |
2697 | TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); |
2698 | if (!TYPE_WARN_IF_NOT_ALIGN (type)) |
2699 | SET_TYPE_WARN_IF_NOT_ALIGN (type, |
2700 | TYPE_WARN_IF_NOT_ALIGN (element)); |
2701 | #ifdef ROUND_TYPE_ALIGN |
2702 | align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT); |
2703 | #else |
2704 | align = MAX (align, BITS_PER_UNIT); |
2705 | #endif |
2706 | SET_TYPE_ALIGN (type, align); |
2707 | SET_TYPE_MODE (type, BLKmode); |
2708 | if (TYPE_SIZE (type) != 0 |
2709 | && ! targetm.member_type_forces_blk (type, VOIDmode) |
2710 | /* BLKmode elements force BLKmode aggregate; |
2711 | else extract/store fields may lose. */ |
2712 | && (TYPE_MODE (TREE_TYPE (type)) != BLKmode |
2713 | || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) |
2714 | { |
2715 | SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type), |
2716 | TYPE_SIZE (type))); |
2717 | if (TYPE_MODE (type) != BLKmode |
2718 | && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT |
2719 | && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) |
2720 | { |
2721 | TYPE_NO_FORCE_BLK (type) = 1; |
2722 | SET_TYPE_MODE (type, BLKmode); |
2723 | } |
2724 | } |
2725 | if (AGGREGATE_TYPE_P (element)) |
2726 | TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element); |
2727 | /* When the element size is constant, check that it is at least as |
2728 | large as the element alignment. */ |
2729 | if (TYPE_SIZE_UNIT (element) |
2730 | && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST |
2731 | /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than |
2732 | TYPE_ALIGN_UNIT. */ |
2733 | && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element)) |
2734 | && !integer_zerop (TYPE_SIZE_UNIT (element))) |
2735 | { |
2736 | if (compare_tree_int (TYPE_SIZE_UNIT (element), |
2737 | TYPE_ALIGN_UNIT (element)) < 0) |
2738 | error ("alignment of array elements is greater than " |
2739 | "element size" ); |
2740 | else if (TYPE_ALIGN_UNIT (element) > 1 |
2741 | && (wi::zext (x: wi::to_wide (TYPE_SIZE_UNIT (element)), |
2742 | offset: ffs_hwi (TYPE_ALIGN_UNIT (element)) - 1) |
2743 | != 0)) |
2744 | error ("size of array element is not a multiple of its " |
2745 | "alignment" ); |
2746 | } |
2747 | break; |
2748 | } |
2749 | |
2750 | case RECORD_TYPE: |
2751 | case UNION_TYPE: |
2752 | case QUAL_UNION_TYPE: |
2753 | { |
2754 | tree field; |
2755 | record_layout_info rli; |
2756 | |
2757 | /* Initialize the layout information. */ |
2758 | rli = start_record_layout (t: type); |
2759 | |
2760 | /* If this is a QUAL_UNION_TYPE, we want to process the fields |
2761 | in the reverse order in building the COND_EXPR that denotes |
2762 | its size. We reverse them again later. */ |
2763 | if (TREE_CODE (type) == QUAL_UNION_TYPE) |
2764 | TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); |
2765 | |
2766 | /* Place all the fields. */ |
2767 | for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
2768 | place_field (rli, field); |
2769 | |
2770 | if (TREE_CODE (type) == QUAL_UNION_TYPE) |
2771 | TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); |
2772 | |
2773 | /* Finish laying out the record. */ |
2774 | finish_record_layout (rli, /*free_p=*/true); |
2775 | } |
2776 | break; |
2777 | |
2778 | default: |
2779 | gcc_unreachable (); |
2780 | } |
2781 | |
2782 | /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For |
2783 | records and unions, finish_record_layout already called this |
2784 | function. */ |
2785 | if (!RECORD_OR_UNION_TYPE_P (type)) |
2786 | finalize_type_size (type); |
2787 | |
2788 | /* We should never see alias sets on incomplete aggregates. And we |
2789 | should not call layout_type on not incomplete aggregates. */ |
2790 | if (AGGREGATE_TYPE_P (type)) |
2791 | gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type)); |
2792 | } |
2793 | |
2794 | /* Return the least alignment required for type TYPE. */ |
2795 | |
2796 | unsigned int |
2797 | min_align_of_type (tree type) |
2798 | { |
2799 | unsigned int align = TYPE_ALIGN (type); |
2800 | if (!TYPE_USER_ALIGN (type)) |
2801 | { |
2802 | align = MIN (align, BIGGEST_ALIGNMENT); |
2803 | #ifdef BIGGEST_FIELD_ALIGNMENT |
2804 | align = MIN (align, BIGGEST_FIELD_ALIGNMENT); |
2805 | #endif |
2806 | unsigned int field_align = align; |
2807 | #ifdef ADJUST_FIELD_ALIGN |
2808 | field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align); |
2809 | #endif |
2810 | align = MIN (align, field_align); |
2811 | } |
2812 | return align / BITS_PER_UNIT; |
2813 | } |
2814 | |
2815 | /* Create and return a type for signed integers of PRECISION bits. */ |
2816 | |
2817 | tree |
2818 | make_signed_type (int precision) |
2819 | { |
2820 | tree type = make_node (INTEGER_TYPE); |
2821 | |
2822 | TYPE_PRECISION (type) = precision; |
2823 | |
2824 | fixup_signed_type (type); |
2825 | return type; |
2826 | } |
2827 | |
2828 | /* Create and return a type for unsigned integers of PRECISION bits. */ |
2829 | |
2830 | tree |
2831 | make_unsigned_type (int precision) |
2832 | { |
2833 | tree type = make_node (INTEGER_TYPE); |
2834 | |
2835 | TYPE_PRECISION (type) = precision; |
2836 | |
2837 | fixup_unsigned_type (type); |
2838 | return type; |
2839 | } |
2840 | |
2841 | /* Create and return a type for fract of PRECISION bits, UNSIGNEDP, |
2842 | and SATP. */ |
2843 | |
2844 | tree |
2845 | make_fract_type (int precision, int unsignedp, int satp) |
2846 | { |
2847 | tree type = make_node (FIXED_POINT_TYPE); |
2848 | |
2849 | TYPE_PRECISION (type) = precision; |
2850 | |
2851 | if (satp) |
2852 | TYPE_SATURATING (type) = 1; |
2853 | |
2854 | /* Lay out the type: set its alignment, size, etc. */ |
2855 | TYPE_UNSIGNED (type) = unsignedp; |
2856 | enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT; |
2857 | SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); |
2858 | layout_type (type); |
2859 | |
2860 | return type; |
2861 | } |
2862 | |
2863 | /* Create and return a type for accum of PRECISION bits, UNSIGNEDP, |
2864 | and SATP. */ |
2865 | |
2866 | tree |
2867 | make_accum_type (int precision, int unsignedp, int satp) |
2868 | { |
2869 | tree type = make_node (FIXED_POINT_TYPE); |
2870 | |
2871 | TYPE_PRECISION (type) = precision; |
2872 | |
2873 | if (satp) |
2874 | TYPE_SATURATING (type) = 1; |
2875 | |
2876 | /* Lay out the type: set its alignment, size, etc. */ |
2877 | TYPE_UNSIGNED (type) = unsignedp; |
2878 | enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM; |
2879 | SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); |
2880 | layout_type (type); |
2881 | |
2882 | return type; |
2883 | } |
2884 | |
2885 | /* Initialize sizetypes so layout_type can use them. */ |
2886 | |
2887 | void |
2888 | initialize_sizetypes (void) |
2889 | { |
2890 | int precision, bprecision; |
2891 | |
2892 | /* Get sizetypes precision from the SIZE_TYPE target macro. */ |
2893 | if (strcmp (SIZETYPE, s2: "unsigned int" ) == 0) |
2894 | precision = INT_TYPE_SIZE; |
2895 | else if (strcmp (SIZETYPE, s2: "long unsigned int" ) == 0) |
2896 | precision = LONG_TYPE_SIZE; |
2897 | else if (strcmp (SIZETYPE, s2: "long long unsigned int" ) == 0) |
2898 | precision = LONG_LONG_TYPE_SIZE; |
2899 | else if (strcmp (SIZETYPE, s2: "short unsigned int" ) == 0) |
2900 | precision = SHORT_TYPE_SIZE; |
2901 | else |
2902 | { |
2903 | int i; |
2904 | |
2905 | precision = -1; |
2906 | for (i = 0; i < NUM_INT_N_ENTS; i++) |
2907 | if (int_n_enabled_p[i]) |
2908 | { |
2909 | char name[50], altname[50]; |
2910 | sprintf (s: name, format: "__int%d unsigned" , int_n_data[i].bitsize); |
2911 | sprintf (s: altname, format: "__int%d__ unsigned" , int_n_data[i].bitsize); |
2912 | |
2913 | if (strcmp (s1: name, SIZETYPE) == 0 |
2914 | || strcmp (s1: altname, SIZETYPE) == 0) |
2915 | { |
2916 | precision = int_n_data[i].bitsize; |
2917 | } |
2918 | } |
2919 | if (precision == -1) |
2920 | gcc_unreachable (); |
2921 | } |
2922 | |
2923 | bprecision |
2924 | = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE); |
2925 | bprecision = GET_MODE_PRECISION (mode: smallest_int_mode_for_size (size: bprecision)); |
2926 | if (bprecision > HOST_BITS_PER_DOUBLE_INT) |
2927 | bprecision = HOST_BITS_PER_DOUBLE_INT; |
2928 | |
2929 | /* Create stubs for sizetype and bitsizetype so we can create constants. */ |
2930 | sizetype = make_node (INTEGER_TYPE); |
2931 | TYPE_NAME (sizetype) = get_identifier ("sizetype" ); |
2932 | TYPE_PRECISION (sizetype) = precision; |
2933 | TYPE_UNSIGNED (sizetype) = 1; |
2934 | bitsizetype = make_node (INTEGER_TYPE); |
2935 | TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype" ); |
2936 | TYPE_PRECISION (bitsizetype) = bprecision; |
2937 | TYPE_UNSIGNED (bitsizetype) = 1; |
2938 | |
2939 | /* Now layout both types manually. */ |
2940 | scalar_int_mode mode = smallest_int_mode_for_size (size: precision); |
2941 | SET_TYPE_MODE (sizetype, mode); |
2942 | SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype))); |
2943 | TYPE_SIZE (sizetype) = bitsize_int (precision); |
2944 | TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode)); |
2945 | set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); |
2946 | |
2947 | mode = smallest_int_mode_for_size (size: bprecision); |
2948 | SET_TYPE_MODE (bitsizetype, mode); |
2949 | SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype))); |
2950 | TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); |
2951 | TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode)); |
2952 | set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); |
2953 | |
2954 | /* Create the signed variants of *sizetype. */ |
2955 | ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); |
2956 | TYPE_NAME (ssizetype) = get_identifier ("ssizetype" ); |
2957 | sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype)); |
2958 | TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype" ); |
2959 | } |
2960 | |
2961 | /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE |
2962 | or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE |
2963 | for TYPE, based on the PRECISION and whether or not the TYPE |
2964 | IS_UNSIGNED. PRECISION need not correspond to a width supported |
2965 | natively by the hardware; for example, on a machine with 8-bit, |
2966 | 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or |
2967 | 61. */ |
2968 | |
2969 | void |
2970 | set_min_and_max_values_for_integral_type (tree type, |
2971 | int precision, |
2972 | signop sgn) |
2973 | { |
2974 | /* For bitfields with zero width we end up creating integer types |
2975 | with zero precision. Don't assign any minimum/maximum values |
2976 | to those types, they don't have any valid value. */ |
2977 | if (precision < 1) |
2978 | return; |
2979 | |
2980 | gcc_assert (precision <= WIDE_INT_MAX_PRECISION); |
2981 | |
2982 | TYPE_MIN_VALUE (type) |
2983 | = wide_int_to_tree (type, cst: wi::min_value (precision, sgn)); |
2984 | TYPE_MAX_VALUE (type) |
2985 | = wide_int_to_tree (type, cst: wi::max_value (precision, sgn)); |
2986 | } |
2987 | |
2988 | /* Set the extreme values of TYPE based on its precision in bits, |
2989 | then lay it out. Used when make_signed_type won't do |
2990 | because the tree code is not INTEGER_TYPE. */ |
2991 | |
2992 | void |
2993 | fixup_signed_type (tree type) |
2994 | { |
2995 | int precision = TYPE_PRECISION (type); |
2996 | |
2997 | set_min_and_max_values_for_integral_type (type, precision, sgn: SIGNED); |
2998 | |
2999 | /* Lay out the type: set its alignment, size, etc. */ |
3000 | layout_type (type); |
3001 | } |
3002 | |
3003 | /* Set the extreme values of TYPE based on its precision in bits, |
3004 | then lay it out. This is used both in `make_unsigned_type' |
3005 | and for enumeral types. */ |
3006 | |
3007 | void |
3008 | fixup_unsigned_type (tree type) |
3009 | { |
3010 | int precision = TYPE_PRECISION (type); |
3011 | |
3012 | TYPE_UNSIGNED (type) = 1; |
3013 | |
3014 | set_min_and_max_values_for_integral_type (type, precision, sgn: UNSIGNED); |
3015 | |
3016 | /* Lay out the type: set its alignment, size, etc. */ |
3017 | layout_type (type); |
3018 | } |
3019 | |
3020 | /* Construct an iterator for a bitfield that spans BITSIZE bits, |
3021 | starting at BITPOS. |
3022 | |
3023 | BITREGION_START is the bit position of the first bit in this |
3024 | sequence of bit fields. BITREGION_END is the last bit in this |
3025 | sequence. If these two fields are non-zero, we should restrict the |
3026 | memory access to that range. Otherwise, we are allowed to touch |
3027 | any adjacent non bit-fields. |
3028 | |
3029 | ALIGN is the alignment of the underlying object in bits. |
3030 | VOLATILEP says whether the bitfield is volatile. */ |
3031 | |
3032 | bit_field_mode_iterator |
3033 | ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, |
3034 | poly_int64 bitregion_start, |
3035 | poly_int64 bitregion_end, |
3036 | unsigned int align, bool volatilep) |
3037 | : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize), |
3038 | m_bitpos (bitpos), m_bitregion_start (bitregion_start), |
3039 | m_bitregion_end (bitregion_end), m_align (align), |
3040 | m_volatilep (volatilep), m_count (0) |
3041 | { |
3042 | if (known_eq (m_bitregion_end, 0)) |
3043 | { |
3044 | /* We can assume that any aligned chunk of ALIGN bits that overlaps |
3045 | the bitfield is mapped and won't trap, provided that ALIGN isn't |
3046 | too large. The cap is the biggest required alignment for data, |
3047 | or at least the word size. And force one such chunk at least. */ |
3048 | unsigned HOST_WIDE_INT units |
3049 | = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD)); |
3050 | if (bitsize <= 0) |
3051 | bitsize = 1; |
3052 | HOST_WIDE_INT end = bitpos + bitsize + units - 1; |
3053 | m_bitregion_end = end - end % units - 1; |
3054 | } |
3055 | } |
3056 | |
3057 | /* Calls to this function return successively larger modes that can be used |
3058 | to represent the bitfield. Return true if another bitfield mode is |
3059 | available, storing it in *OUT_MODE if so. */ |
3060 | |
3061 | bool |
3062 | bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode) |
3063 | { |
3064 | scalar_int_mode mode; |
3065 | for (; m_mode.exists (mode: &mode); m_mode = GET_MODE_WIDER_MODE (m: mode)) |
3066 | { |
3067 | unsigned int unit = GET_MODE_BITSIZE (mode); |
3068 | |
3069 | /* Skip modes that don't have full precision. */ |
3070 | if (unit != GET_MODE_PRECISION (mode)) |
3071 | continue; |
3072 | |
3073 | /* Stop if the mode is too wide to handle efficiently. */ |
3074 | if (unit > MAX_FIXED_MODE_SIZE) |
3075 | break; |
3076 | |
3077 | /* Don't deliver more than one multiword mode; the smallest one |
3078 | should be used. */ |
3079 | if (m_count > 0 && unit > BITS_PER_WORD) |
3080 | break; |
3081 | |
3082 | /* Skip modes that are too small. */ |
3083 | unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit; |
3084 | unsigned HOST_WIDE_INT subend = substart + m_bitsize; |
3085 | if (subend > unit) |
3086 | continue; |
3087 | |
3088 | /* Stop if the mode goes outside the bitregion. */ |
3089 | HOST_WIDE_INT start = m_bitpos - substart; |
3090 | if (maybe_ne (a: m_bitregion_start, b: 0) |
3091 | && maybe_lt (a: start, b: m_bitregion_start)) |
3092 | break; |
3093 | HOST_WIDE_INT end = start + unit; |
3094 | if (maybe_gt (end, m_bitregion_end + 1)) |
3095 | break; |
3096 | |
3097 | /* Stop if the mode requires too much alignment. */ |
3098 | if (GET_MODE_ALIGNMENT (mode) > m_align |
3099 | && targetm.slow_unaligned_access (mode, m_align)) |
3100 | break; |
3101 | |
3102 | *out_mode = mode; |
3103 | m_mode = GET_MODE_WIDER_MODE (m: mode); |
3104 | m_count++; |
3105 | return true; |
3106 | } |
3107 | return false; |
3108 | } |
3109 | |
3110 | /* Return true if smaller modes are generally preferred for this kind |
3111 | of bitfield. */ |
3112 | |
3113 | bool |
3114 | bit_field_mode_iterator::prefer_smaller_modes () |
3115 | { |
3116 | return (m_volatilep |
3117 | ? targetm.narrow_volatile_bitfield () |
3118 | : !SLOW_BYTE_ACCESS); |
3119 | } |
3120 | |
3121 | /* Find the best machine mode to use when referencing a bit field of length |
3122 | BITSIZE bits starting at BITPOS. |
3123 | |
3124 | BITREGION_START is the bit position of the first bit in this |
3125 | sequence of bit fields. BITREGION_END is the last bit in this |
3126 | sequence. If these two fields are non-zero, we should restrict the |
3127 | memory access to that range. Otherwise, we are allowed to touch |
3128 | any adjacent non bit-fields. |
3129 | |
3130 | The chosen mode must have no more than LARGEST_MODE_BITSIZE bits. |
3131 | INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller |
3132 | doesn't want to apply a specific limit. |
3133 | |
3134 | If no mode meets all these conditions, we return VOIDmode. |
3135 | |
3136 | The underlying object is known to be aligned to a boundary of ALIGN bits. |
3137 | |
3138 | If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the |
3139 | smallest mode meeting these conditions. |
3140 | |
3141 | If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the |
3142 | largest mode (but a mode no wider than UNITS_PER_WORD) that meets |
3143 | all the conditions. |
3144 | |
3145 | If VOLATILEP is true the narrow_volatile_bitfields target hook is used to |
3146 | decide which of the above modes should be used. */ |
3147 | |
3148 | bool |
3149 | get_best_mode (int bitsize, int bitpos, |
3150 | poly_uint64 bitregion_start, poly_uint64 bitregion_end, |
3151 | unsigned int align, |
3152 | unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep, |
3153 | scalar_int_mode *best_mode) |
3154 | { |
3155 | bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start, |
3156 | bitregion_end, align, volatilep); |
3157 | scalar_int_mode mode; |
3158 | bool found = false; |
3159 | while (iter.next_mode (out_mode: &mode) |
3160 | /* ??? For historical reasons, reject modes that would normally |
3161 | receive greater alignment, even if unaligned accesses are |
3162 | acceptable. This has both advantages and disadvantages. |
3163 | Removing this check means that something like: |
3164 | |
3165 | struct s { unsigned int x; unsigned int y; }; |
3166 | int f (struct s *s) { return s->x == 0 && s->y == 0; } |
3167 | |
3168 | can be implemented using a single load and compare on |
3169 | 64-bit machines that have no alignment restrictions. |
3170 | For example, on powerpc64-linux-gnu, we would generate: |
3171 | |
3172 | ld 3,0(3) |
3173 | cntlzd 3,3 |
3174 | srdi 3,3,6 |
3175 | blr |
3176 | |
3177 | rather than: |
3178 | |
3179 | lwz 9,0(3) |
3180 | cmpwi 7,9,0 |
3181 | bne 7,.L3 |
3182 | lwz 3,4(3) |
3183 | cntlzw 3,3 |
3184 | srwi 3,3,5 |
3185 | extsw 3,3 |
3186 | blr |
3187 | .p2align 4,,15 |
3188 | .L3: |
3189 | li 3,0 |
3190 | blr |
3191 | |
3192 | However, accessing more than one field can make life harder |
3193 | for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c |
3194 | has a series of unsigned short copies followed by a series of |
3195 | unsigned short comparisons. With this check, both the copies |
3196 | and comparisons remain 16-bit accesses and FRE is able |
3197 | to eliminate the latter. Without the check, the comparisons |
3198 | can be done using 2 64-bit operations, which FRE isn't able |
3199 | to handle in the same way. |
3200 | |
3201 | Either way, it would probably be worth disabling this check |
3202 | during expand. One particular example where removing the |
3203 | check would help is the get_best_mode call in store_bit_field. |
3204 | If we are given a memory bitregion of 128 bits that is aligned |
3205 | to a 64-bit boundary, and the bitfield we want to modify is |
3206 | in the second half of the bitregion, this check causes |
3207 | store_bitfield to turn the memory into a 64-bit reference |
3208 | to the _first_ half of the region. We later use |
3209 | adjust_bitfield_address to get a reference to the correct half, |
3210 | but doing so looks to adjust_bitfield_address as though we are |
3211 | moving past the end of the original object, so it drops the |
3212 | associated MEM_EXPR and MEM_OFFSET. Removing the check |
3213 | causes store_bit_field to keep a 128-bit memory reference, |
3214 | so that the final bitfield reference still has a MEM_EXPR |
3215 | and MEM_OFFSET. */ |
3216 | && GET_MODE_ALIGNMENT (mode) <= align |
3217 | && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize) |
3218 | { |
3219 | *best_mode = mode; |
3220 | found = true; |
3221 | if (iter.prefer_smaller_modes ()) |
3222 | break; |
3223 | } |
3224 | |
3225 | return found; |
3226 | } |
3227 | |
3228 | /* Gets minimal and maximal values for MODE (signed or unsigned depending on |
3229 | SIGN). The returned constants are made to be usable in TARGET_MODE. */ |
3230 | |
3231 | void |
3232 | get_mode_bounds (scalar_int_mode mode, int sign, |
3233 | scalar_int_mode target_mode, |
3234 | rtx *mmin, rtx *mmax) |
3235 | { |
3236 | unsigned size = GET_MODE_PRECISION (mode); |
3237 | unsigned HOST_WIDE_INT min_val, max_val; |
3238 | |
3239 | gcc_assert (size <= HOST_BITS_PER_WIDE_INT); |
3240 | |
3241 | /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */ |
3242 | if (mode == BImode) |
3243 | { |
3244 | if (STORE_FLAG_VALUE < 0) |
3245 | { |
3246 | min_val = STORE_FLAG_VALUE; |
3247 | max_val = 0; |
3248 | } |
3249 | else |
3250 | { |
3251 | min_val = 0; |
3252 | max_val = STORE_FLAG_VALUE; |
3253 | } |
3254 | } |
3255 | else if (sign) |
3256 | { |
3257 | min_val = -(HOST_WIDE_INT_1U << (size - 1)); |
3258 | max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1; |
3259 | } |
3260 | else |
3261 | { |
3262 | min_val = 0; |
3263 | max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1; |
3264 | } |
3265 | |
3266 | *mmin = gen_int_mode (min_val, target_mode); |
3267 | *mmax = gen_int_mode (max_val, target_mode); |
3268 | } |
3269 | |
3270 | #include "gt-stor-layout.h" |
3271 | |