1 | /* Basic IPA utilities for type inheritance graph construction and |
2 | devirtualization. |
3 | Copyright (C) 2013-2024 Free Software Foundation, Inc. |
4 | Contributed by Jan Hubicka |
5 | |
6 | This file is part of GCC. |
7 | |
8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free |
10 | Software Foundation; either version 3, or (at your option) any later |
11 | version. |
12 | |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. |
17 | |
18 | You should have received a copy of the GNU General Public License |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ |
21 | |
22 | /* Brief vocabulary: |
23 | ODR = One Definition Rule |
24 | In short, the ODR states that: |
25 | 1 In any translation unit, a template, type, function, or object can |
26 | have no more than one definition. Some of these can have any number |
27 | of declarations. A definition provides an instance. |
28 | 2 In the entire program, an object or non-inline function cannot have |
29 | more than one definition; if an object or function is used, it must |
30 | have exactly one definition. You can declare an object or function |
31 | that is never used, in which case you don't have to provide |
32 | a definition. In no event can there be more than one definition. |
33 | 3 Some things, like types, templates, and extern inline functions, can |
34 | be defined in more than one translation unit. For a given entity, |
35 | each definition must be the same. Non-extern objects and functions |
36 | in different translation units are different entities, even if their |
37 | names and types are the same. |
38 | |
39 | OTR = OBJ_TYPE_REF |
40 | This is the Gimple representation of type information of a polymorphic call. |
41 | It contains two parameters: |
42 | otr_type is a type of class whose method is called. |
43 | otr_token is the index into virtual table where address is taken. |
44 | |
45 | BINFO |
46 | This is the type inheritance information attached to each tree |
47 | RECORD_TYPE by the C++ frontend. It provides information about base |
48 | types and virtual tables. |
49 | |
50 | BINFO is linked to the RECORD_TYPE by TYPE_BINFO. |
51 | BINFO also links to its type by BINFO_TYPE and to the virtual table by |
52 | BINFO_VTABLE. |
53 | |
54 | Base types of a given type are enumerated by BINFO_BASE_BINFO |
55 | vector. Members of this vectors are not BINFOs associated |
56 | with a base type. Rather they are new copies of BINFOs |
57 | (base BINFOs). Their virtual tables may differ from |
58 | virtual table of the base type. Also BINFO_OFFSET specifies |
59 | offset of the base within the type. |
60 | |
61 | In the case of single inheritance, the virtual table is shared |
62 | and BINFO_VTABLE of base BINFO is NULL. In the case of multiple |
63 | inheritance the individual virtual tables are pointer to by |
64 | BINFO_VTABLE of base binfos (that differs of BINFO_VTABLE of |
65 | binfo associated to the base type). |
66 | |
67 | BINFO lookup for a given base type and offset can be done by |
68 | get_binfo_at_offset. It returns proper BINFO whose virtual table |
69 | can be used for lookup of virtual methods associated with the |
70 | base type. |
71 | |
72 | token |
73 | This is an index of virtual method in virtual table associated |
74 | to the type defining it. Token can be looked up from OBJ_TYPE_REF |
75 | or from DECL_VINDEX of a given virtual table. |
76 | |
77 | polymorphic (indirect) call |
78 | This is callgraph representation of virtual method call. Every |
79 | polymorphic call contains otr_type and otr_token taken from |
80 | original OBJ_TYPE_REF at callgraph construction time. |
81 | |
82 | What we do here: |
83 | |
84 | build_type_inheritance_graph triggers a construction of the type inheritance |
85 | graph. |
86 | |
87 | We reconstruct it based on types of methods we see in the unit. |
88 | This means that the graph is not complete. Types with no methods are not |
89 | inserted into the graph. Also types without virtual methods are not |
90 | represented at all, though it may be easy to add this. |
91 | |
92 | The inheritance graph is represented as follows: |
93 | |
94 | Vertices are structures odr_type. Every odr_type may correspond |
95 | to one or more tree type nodes that are equivalent by ODR rule. |
96 | (the multiple type nodes appear only with linktime optimization) |
97 | |
98 | Edges are represented by odr_type->base and odr_type->derived_types. |
99 | At the moment we do not track offsets of types for multiple inheritance. |
100 | Adding this is easy. |
101 | |
102 | possible_polymorphic_call_targets returns, given an parameters found in |
103 | indirect polymorphic edge all possible polymorphic call targets of the call. |
104 | |
105 | pass_ipa_devirt performs simple speculative devirtualization. |
106 | */ |
107 | |
108 | #include "config.h" |
109 | #include "system.h" |
110 | #include "coretypes.h" |
111 | #include "backend.h" |
112 | #include "rtl.h" |
113 | #include "tree.h" |
114 | #include "gimple.h" |
115 | #include "alloc-pool.h" |
116 | #include "tree-pass.h" |
117 | #include "cgraph.h" |
118 | #include "lto-streamer.h" |
119 | #include "fold-const.h" |
120 | #include "print-tree.h" |
121 | #include "calls.h" |
122 | #include "ipa-utils.h" |
123 | #include "gimple-iterator.h" |
124 | #include "gimple-fold.h" |
125 | #include "symbol-summary.h" |
126 | #include "tree-vrp.h" |
127 | #include "sreal.h" |
128 | #include "ipa-cp.h" |
129 | #include "ipa-prop.h" |
130 | #include "ipa-fnsummary.h" |
131 | #include "demangle.h" |
132 | #include "dbgcnt.h" |
133 | #include "gimple-pretty-print.h" |
134 | #include "intl.h" |
135 | #include "stringpool.h" |
136 | #include "attribs.h" |
137 | #include "data-streamer.h" |
138 | #include "lto-streamer.h" |
139 | #include "streamer-hooks.h" |
140 | |
141 | /* Hash based set of pairs of types. */ |
142 | struct type_pair |
143 | { |
144 | tree first; |
145 | tree second; |
146 | }; |
147 | |
148 | template <> |
149 | struct default_hash_traits <type_pair> |
150 | : typed_noop_remove <type_pair> |
151 | { |
152 | GTY((skip)) typedef type_pair value_type; |
153 | GTY((skip)) typedef type_pair compare_type; |
154 | static hashval_t |
155 | hash (type_pair p) |
156 | { |
157 | return TYPE_UID (p.first) ^ TYPE_UID (p.second); |
158 | } |
159 | static const bool empty_zero_p = true; |
160 | static bool |
161 | is_empty (type_pair p) |
162 | { |
163 | return p.first == NULL; |
164 | } |
165 | static bool |
166 | is_deleted (type_pair p ATTRIBUTE_UNUSED) |
167 | { |
168 | return false; |
169 | } |
170 | static bool |
171 | equal (const type_pair &a, const type_pair &b) |
172 | { |
173 | return a.first==b.first && a.second == b.second; |
174 | } |
175 | static void |
176 | mark_empty (type_pair &e) |
177 | { |
178 | e.first = NULL; |
179 | } |
180 | }; |
181 | |
182 | /* HACK alert: this is used to communicate with ipa-inline-transform that |
183 | thunk is being expanded and there is no need to clear the polymorphic |
184 | call target cache. */ |
185 | bool thunk_expansion; |
186 | |
187 | static bool odr_types_equivalent_p (tree, tree, bool, bool *, |
188 | hash_set<type_pair> *, |
189 | location_t, location_t); |
190 | static void warn_odr (tree t1, tree t2, tree st1, tree st2, |
191 | bool warn, bool *warned, const char *reason); |
192 | |
193 | static bool odr_violation_reported = false; |
194 | |
195 | |
196 | /* Pointer set of all call targets appearing in the cache. */ |
197 | static hash_set<cgraph_node *> *cached_polymorphic_call_targets; |
198 | |
199 | /* The node of type inheritance graph. For each type unique in |
200 | One Definition Rule (ODR) sense, we produce one node linking all |
201 | main variants of types equivalent to it, bases and derived types. */ |
202 | |
203 | struct GTY(()) odr_type_d |
204 | { |
205 | /* leader type. */ |
206 | tree type; |
207 | /* All bases; built only for main variants of types. */ |
208 | vec<odr_type> GTY((skip)) bases; |
209 | /* All derived types with virtual methods seen in unit; |
210 | built only for main variants of types. */ |
211 | vec<odr_type> GTY((skip)) derived_types; |
212 | |
213 | /* All equivalent types, if more than one. */ |
214 | vec<tree, va_gc> *types; |
215 | /* Set of all equivalent types, if NON-NULL. */ |
216 | hash_set<tree> * GTY((skip)) types_set; |
217 | |
218 | /* Unique ID indexing the type in odr_types array. */ |
219 | int id; |
220 | /* Is it in anonymous namespace? */ |
221 | bool anonymous_namespace; |
222 | /* Do we know about all derivations of given type? */ |
223 | bool all_derivations_known; |
224 | /* Did we report ODR violation here? */ |
225 | bool odr_violated; |
226 | /* Set when virtual table without RTTI prevailed table with. */ |
227 | bool rtti_broken; |
228 | /* Set when the canonical type is determined using the type name. */ |
229 | bool tbaa_enabled; |
230 | }; |
231 | |
232 | /* Return TRUE if all derived types of T are known and thus |
233 | we may consider the walk of derived type complete. |
234 | |
235 | This is typically true only for final anonymous namespace types and types |
236 | defined within functions (that may be COMDAT and thus shared across units, |
237 | but with the same set of derived types). */ |
238 | |
239 | bool |
240 | type_all_derivations_known_p (const_tree t) |
241 | { |
242 | if (TYPE_FINAL_P (t)) |
243 | return true; |
244 | if (flag_ltrans) |
245 | return false; |
246 | /* Non-C++ types may have IDENTIFIER_NODE here, do not crash. */ |
247 | if (!TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL) |
248 | return true; |
249 | if (type_in_anonymous_namespace_p (t)) |
250 | return true; |
251 | return (decl_function_context (TYPE_NAME (t)) != NULL); |
252 | } |
253 | |
254 | /* Return TRUE if type's constructors are all visible. */ |
255 | |
256 | static bool |
257 | type_all_ctors_visible_p (tree t) |
258 | { |
259 | return !flag_ltrans |
260 | && symtab->state >= CONSTRUCTION |
261 | /* We cannot always use type_all_derivations_known_p. |
262 | For function local types we must assume case where |
263 | the function is COMDAT and shared in between units. |
264 | |
265 | TODO: These cases are quite easy to get, but we need |
266 | to keep track of C++ privatizing via -Wno-weak |
267 | as well as the IPA privatizing. */ |
268 | && type_in_anonymous_namespace_p (t); |
269 | } |
270 | |
271 | /* Return TRUE if type may have instance. */ |
272 | |
273 | static bool |
274 | type_possibly_instantiated_p (tree t) |
275 | { |
276 | tree vtable; |
277 | varpool_node *vnode; |
278 | |
279 | /* TODO: Add abstract types here. */ |
280 | if (!type_all_ctors_visible_p (t)) |
281 | return true; |
282 | |
283 | vtable = BINFO_VTABLE (TYPE_BINFO (t)); |
284 | if (TREE_CODE (vtable) == POINTER_PLUS_EXPR) |
285 | vtable = TREE_OPERAND (TREE_OPERAND (vtable, 0), 0); |
286 | vnode = varpool_node::get (decl: vtable); |
287 | return vnode && vnode->definition; |
288 | } |
289 | |
290 | /* Return true if T or type derived from T may have instance. */ |
291 | |
292 | static bool |
293 | type_or_derived_type_possibly_instantiated_p (odr_type t) |
294 | { |
295 | if (type_possibly_instantiated_p (t: t->type)) |
296 | return true; |
297 | for (auto derived : t->derived_types) |
298 | if (type_or_derived_type_possibly_instantiated_p (t: derived)) |
299 | return true; |
300 | return false; |
301 | } |
302 | |
303 | /* Hash used to unify ODR types based on their mangled name and for anonymous |
304 | namespace types. */ |
305 | |
306 | struct odr_name_hasher : pointer_hash <odr_type_d> |
307 | { |
308 | typedef union tree_node *compare_type; |
309 | static inline hashval_t hash (const odr_type_d *); |
310 | static inline bool equal (const odr_type_d *, const tree_node *); |
311 | static inline void remove (odr_type_d *); |
312 | }; |
313 | |
314 | static bool |
315 | can_be_name_hashed_p (tree t) |
316 | { |
317 | return (!in_lto_p || odr_type_p (t)); |
318 | } |
319 | |
320 | /* Hash type by its ODR name. */ |
321 | |
322 | static hashval_t |
323 | hash_odr_name (const_tree t) |
324 | { |
325 | gcc_checking_assert (TYPE_MAIN_VARIANT (t) == t); |
326 | |
327 | /* If not in LTO, all main variants are unique, so we can do |
328 | pointer hash. */ |
329 | if (!in_lto_p) |
330 | return htab_hash_pointer (t); |
331 | |
332 | /* Anonymous types are unique. */ |
333 | if (type_with_linkage_p (t) && type_in_anonymous_namespace_p (t)) |
334 | return htab_hash_pointer (t); |
335 | |
336 | gcc_checking_assert (TYPE_NAME (t) |
337 | && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))); |
338 | return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (TYPE_NAME (t))); |
339 | } |
340 | |
341 | /* Return the computed hashcode for ODR_TYPE. */ |
342 | |
343 | inline hashval_t |
344 | odr_name_hasher::hash (const odr_type_d *odr_type) |
345 | { |
346 | return hash_odr_name (t: odr_type->type); |
347 | } |
348 | |
349 | /* For languages with One Definition Rule, work out if |
350 | types are the same based on their name. |
351 | |
352 | This is non-trivial for LTO where minor differences in |
353 | the type representation may have prevented type merging |
354 | to merge two copies of otherwise equivalent type. |
355 | |
356 | Until we start streaming mangled type names, this function works |
357 | only for polymorphic types. |
358 | */ |
359 | |
360 | bool |
361 | types_same_for_odr (const_tree type1, const_tree type2) |
362 | { |
363 | gcc_checking_assert (TYPE_P (type1) && TYPE_P (type2)); |
364 | |
365 | type1 = TYPE_MAIN_VARIANT (type1); |
366 | type2 = TYPE_MAIN_VARIANT (type2); |
367 | |
368 | if (type1 == type2) |
369 | return true; |
370 | |
371 | if (!in_lto_p) |
372 | return false; |
373 | |
374 | /* Anonymous namespace types are never duplicated. */ |
375 | if ((type_with_linkage_p (t: type1) && type_in_anonymous_namespace_p (t: type1)) |
376 | || (type_with_linkage_p (t: type2) && type_in_anonymous_namespace_p (t: type2))) |
377 | return false; |
378 | |
379 | /* If both type has mangled defined check if they are same. |
380 | Watch for anonymous types which are all mangled as "<anon">. */ |
381 | if (!type_with_linkage_p (t: type1) || !type_with_linkage_p (t: type2)) |
382 | return false; |
383 | if (type_in_anonymous_namespace_p (t: type1) |
384 | || type_in_anonymous_namespace_p (t: type2)) |
385 | return false; |
386 | return (DECL_ASSEMBLER_NAME (TYPE_NAME (type1)) |
387 | == DECL_ASSEMBLER_NAME (TYPE_NAME (type2))); |
388 | } |
389 | |
390 | /* Return true if we can decide on ODR equivalency. |
391 | |
392 | In non-LTO it is always decide, in LTO however it depends in the type has |
393 | ODR info attached. */ |
394 | |
395 | bool |
396 | types_odr_comparable (tree t1, tree t2) |
397 | { |
398 | return (!in_lto_p |
399 | || TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2) |
400 | || (odr_type_p (TYPE_MAIN_VARIANT (t1)) |
401 | && odr_type_p (TYPE_MAIN_VARIANT (t2)))); |
402 | } |
403 | |
404 | /* Return true if T1 and T2 are ODR equivalent. If ODR equivalency is not |
405 | known, be conservative and return false. */ |
406 | |
407 | bool |
408 | types_must_be_same_for_odr (tree t1, tree t2) |
409 | { |
410 | if (types_odr_comparable (t1, t2)) |
411 | return types_same_for_odr (type1: t1, type2: t2); |
412 | else |
413 | return TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2); |
414 | } |
415 | |
416 | /* If T is compound type, return type it is based on. */ |
417 | |
418 | static tree |
419 | compound_type_base (const_tree t) |
420 | { |
421 | if (TREE_CODE (t) == ARRAY_TYPE |
422 | || POINTER_TYPE_P (t) |
423 | || TREE_CODE (t) == COMPLEX_TYPE |
424 | || VECTOR_TYPE_P (t)) |
425 | return TREE_TYPE (t); |
426 | if (TREE_CODE (t) == METHOD_TYPE) |
427 | return TYPE_METHOD_BASETYPE (t); |
428 | if (TREE_CODE (t) == OFFSET_TYPE) |
429 | return TYPE_OFFSET_BASETYPE (t); |
430 | return NULL_TREE; |
431 | } |
432 | |
433 | /* Return true if T is either ODR type or compound type based from it. |
434 | If the function return true, we know that T is a type originating from C++ |
435 | source even at link-time. */ |
436 | |
437 | bool |
438 | odr_or_derived_type_p (const_tree t) |
439 | { |
440 | do |
441 | { |
442 | if (odr_type_p (TYPE_MAIN_VARIANT (t))) |
443 | return true; |
444 | /* Function type is a tricky one. Basically we can consider it |
445 | ODR derived if return type or any of the parameters is. |
446 | We need to check all parameters because LTO streaming merges |
447 | common types (such as void) and they are not considered ODR then. */ |
448 | if (TREE_CODE (t) == FUNCTION_TYPE) |
449 | { |
450 | if (TYPE_METHOD_BASETYPE (t)) |
451 | t = TYPE_METHOD_BASETYPE (t); |
452 | else |
453 | { |
454 | if (TREE_TYPE (t) && odr_or_derived_type_p (TREE_TYPE (t))) |
455 | return true; |
456 | for (t = TYPE_ARG_TYPES (t); t; t = TREE_CHAIN (t)) |
457 | if (odr_or_derived_type_p (TYPE_MAIN_VARIANT (TREE_VALUE (t)))) |
458 | return true; |
459 | return false; |
460 | } |
461 | } |
462 | else |
463 | t = compound_type_base (t); |
464 | } |
465 | while (t); |
466 | return t; |
467 | } |
468 | |
469 | /* Compare types T1 and T2 and return true if they are |
470 | equivalent. */ |
471 | |
472 | inline bool |
473 | odr_name_hasher::equal (const odr_type_d *o1, const tree_node *t2) |
474 | { |
475 | tree t1 = o1->type; |
476 | |
477 | gcc_checking_assert (TYPE_MAIN_VARIANT (t2) == t2); |
478 | gcc_checking_assert (TYPE_MAIN_VARIANT (t1) == t1); |
479 | if (t1 == t2) |
480 | return true; |
481 | if (!in_lto_p) |
482 | return false; |
483 | /* Check for anonymous namespaces. */ |
484 | if ((type_with_linkage_p (t: t1) && type_in_anonymous_namespace_p (t: t1)) |
485 | || (type_with_linkage_p (t: t2) && type_in_anonymous_namespace_p (t: t2))) |
486 | return false; |
487 | gcc_checking_assert (DECL_ASSEMBLER_NAME (TYPE_NAME (t1))); |
488 | gcc_checking_assert (DECL_ASSEMBLER_NAME (TYPE_NAME (t2))); |
489 | return (DECL_ASSEMBLER_NAME (TYPE_NAME (t1)) |
490 | == DECL_ASSEMBLER_NAME (TYPE_NAME (t2))); |
491 | } |
492 | |
493 | /* Free ODR type V. */ |
494 | |
495 | inline void |
496 | odr_name_hasher::remove (odr_type_d *v) |
497 | { |
498 | v->bases.release (); |
499 | v->derived_types.release (); |
500 | if (v->types_set) |
501 | delete v->types_set; |
502 | ggc_free (v); |
503 | } |
504 | |
505 | /* ODR type hash used to look up ODR type based on tree type node. */ |
506 | |
507 | typedef hash_table<odr_name_hasher> odr_hash_type; |
508 | static odr_hash_type *odr_hash; |
509 | |
510 | /* ODR types are also stored into ODR_TYPE vector to allow consistent |
511 | walking. Bases appear before derived types. Vector is garbage collected |
512 | so we won't end up visiting empty types. */ |
513 | |
514 | static GTY(()) vec <odr_type, va_gc> *odr_types_ptr; |
515 | #define odr_types (*odr_types_ptr) |
516 | |
517 | /* All enums defined and accessible for the unit. */ |
518 | static GTY(()) vec <tree, va_gc> *odr_enums; |
519 | |
520 | /* Information we hold about value defined by an enum type. */ |
521 | struct odr_enum_val |
522 | { |
523 | const char *name; |
524 | wide_int val; |
525 | location_t locus; |
526 | }; |
527 | |
528 | /* Information about enum values. */ |
529 | struct odr_enum |
530 | { |
531 | location_t locus; |
532 | auto_vec<odr_enum_val, 0> vals; |
533 | bool warned; |
534 | }; |
535 | |
536 | /* A table of all ODR enum definitions. */ |
537 | static hash_map <nofree_string_hash, odr_enum> *odr_enum_map = NULL; |
538 | static struct obstack odr_enum_obstack; |
539 | |
540 | /* Set TYPE_BINFO of TYPE and its variants to BINFO. */ |
541 | void |
542 | set_type_binfo (tree type, tree binfo) |
543 | { |
544 | for (; type; type = TYPE_NEXT_VARIANT (type)) |
545 | if (COMPLETE_TYPE_P (type)) |
546 | TYPE_BINFO (type) = binfo; |
547 | else |
548 | gcc_assert (!TYPE_BINFO (type)); |
549 | } |
550 | |
551 | /* Return true if type variants match. |
552 | This assumes that we already verified that T1 and T2 are variants of the |
553 | same type. */ |
554 | |
555 | static bool |
556 | type_variants_equivalent_p (tree t1, tree t2) |
557 | { |
558 | if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) |
559 | return false; |
560 | |
561 | if (comp_type_attributes (t1, t2) != 1) |
562 | return false; |
563 | |
564 | if (COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2) |
565 | && TYPE_ALIGN (t1) != TYPE_ALIGN (t2)) |
566 | return false; |
567 | |
568 | return true; |
569 | } |
570 | |
571 | /* Compare T1 and T2 based on name or structure. */ |
572 | |
573 | static bool |
574 | odr_subtypes_equivalent_p (tree t1, tree t2, |
575 | hash_set<type_pair> *visited, |
576 | location_t loc1, location_t loc2) |
577 | { |
578 | |
579 | /* This can happen in incomplete types that should be handled earlier. */ |
580 | gcc_assert (t1 && t2); |
581 | |
582 | if (t1 == t2) |
583 | return true; |
584 | |
585 | /* Anonymous namespace types must match exactly. */ |
586 | if ((type_with_linkage_p (TYPE_MAIN_VARIANT (t1)) |
587 | && type_in_anonymous_namespace_p (TYPE_MAIN_VARIANT (t1))) |
588 | || (type_with_linkage_p (TYPE_MAIN_VARIANT (t2)) |
589 | && type_in_anonymous_namespace_p (TYPE_MAIN_VARIANT (t2)))) |
590 | return false; |
591 | |
592 | /* For ODR types be sure to compare their names. |
593 | To support -Wno-odr-type-merging we allow one type to be non-ODR |
594 | and other ODR even though it is a violation. */ |
595 | if (types_odr_comparable (t1, t2)) |
596 | { |
597 | if (t1 != t2 |
598 | && odr_type_p (TYPE_MAIN_VARIANT (t1)) |
599 | && get_odr_type (TYPE_MAIN_VARIANT (t1), insert: true)->odr_violated) |
600 | return false; |
601 | if (!types_same_for_odr (type1: t1, type2: t2)) |
602 | return false; |
603 | if (!type_variants_equivalent_p (t1, t2)) |
604 | return false; |
605 | /* Limit recursion: If subtypes are ODR types and we know |
606 | that they are same, be happy. */ |
607 | if (odr_type_p (TYPE_MAIN_VARIANT (t1))) |
608 | return true; |
609 | } |
610 | |
611 | /* Component types, builtins and possibly violating ODR types |
612 | have to be compared structurally. */ |
613 | if (TREE_CODE (t1) != TREE_CODE (t2)) |
614 | return false; |
615 | if (AGGREGATE_TYPE_P (t1) |
616 | && (TYPE_NAME (t1) == NULL_TREE) != (TYPE_NAME (t2) == NULL_TREE)) |
617 | return false; |
618 | |
619 | type_pair pair={TYPE_MAIN_VARIANT (t1), TYPE_MAIN_VARIANT (t2)}; |
620 | if (TYPE_UID (TYPE_MAIN_VARIANT (t1)) > TYPE_UID (TYPE_MAIN_VARIANT (t2))) |
621 | { |
622 | pair.first = TYPE_MAIN_VARIANT (t2); |
623 | pair.second = TYPE_MAIN_VARIANT (t1); |
624 | } |
625 | if (visited->add (k: pair)) |
626 | return true; |
627 | if (!odr_types_equivalent_p (TYPE_MAIN_VARIANT (t1), TYPE_MAIN_VARIANT (t2), |
628 | false, NULL, visited, loc1, loc2)) |
629 | return false; |
630 | if (!type_variants_equivalent_p (t1, t2)) |
631 | return false; |
632 | return true; |
633 | } |
634 | |
635 | /* Return true if DECL1 and DECL2 are identical methods. Consider |
636 | name equivalent to name.localalias.xyz. */ |
637 | |
638 | static bool |
639 | methods_equal_p (tree decl1, tree decl2) |
640 | { |
641 | if (DECL_ASSEMBLER_NAME (decl1) == DECL_ASSEMBLER_NAME (decl2)) |
642 | return true; |
643 | const char sep = symbol_table::symbol_suffix_separator (); |
644 | |
645 | const char *name1 = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl1)); |
646 | const char *ptr1 = strchr (s: name1, c: sep); |
647 | int len1 = ptr1 ? ptr1 - name1 : strlen (s: name1); |
648 | |
649 | const char *name2 = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl2)); |
650 | const char *ptr2 = strchr (s: name2, c: sep); |
651 | int len2 = ptr2 ? ptr2 - name2 : strlen (s: name2); |
652 | |
653 | if (len1 != len2) |
654 | return false; |
655 | return !strncmp (s1: name1, s2: name2, n: len1); |
656 | } |
657 | |
658 | /* Compare two virtual tables, PREVAILING and VTABLE and output ODR |
659 | violation warnings. */ |
660 | |
661 | void |
662 | compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) |
663 | { |
664 | int n1, n2; |
665 | |
666 | if (DECL_VIRTUAL_P (prevailing->decl) != DECL_VIRTUAL_P (vtable->decl)) |
667 | { |
668 | odr_violation_reported = true; |
669 | if (DECL_VIRTUAL_P (prevailing->decl)) |
670 | { |
671 | varpool_node *tmp = prevailing; |
672 | prevailing = vtable; |
673 | vtable = tmp; |
674 | } |
675 | auto_diagnostic_group d; |
676 | if (warning_at (DECL_SOURCE_LOCATION |
677 | (TYPE_NAME (DECL_CONTEXT (vtable->decl))), |
678 | OPT_Wodr, |
679 | "virtual table of type %qD violates one definition rule" , |
680 | DECL_CONTEXT (vtable->decl))) |
681 | inform (DECL_SOURCE_LOCATION (prevailing->decl), |
682 | "variable of same assembler name as the virtual table is " |
683 | "defined in another translation unit" ); |
684 | return; |
685 | } |
686 | if (!prevailing->definition || !vtable->definition) |
687 | return; |
688 | |
689 | /* If we do not stream ODR type info, do not bother to do useful compare. */ |
690 | if (!TYPE_BINFO (DECL_CONTEXT (vtable->decl)) |
691 | || !polymorphic_type_binfo_p (TYPE_BINFO (DECL_CONTEXT (vtable->decl)))) |
692 | return; |
693 | |
694 | odr_type class_type = get_odr_type (DECL_CONTEXT (vtable->decl), insert: true); |
695 | |
696 | if (class_type->odr_violated) |
697 | return; |
698 | |
699 | for (n1 = 0, n2 = 0; true; n1++, n2++) |
700 | { |
701 | struct ipa_ref *ref1, *ref2; |
702 | bool end1, end2; |
703 | |
704 | end1 = !prevailing->iterate_reference (i: n1, ref&: ref1); |
705 | end2 = !vtable->iterate_reference (i: n2, ref&: ref2); |
706 | |
707 | /* !DECL_VIRTUAL_P means RTTI entry; |
708 | We warn when RTTI is lost because non-RTTI prevails; we silently |
709 | accept the other case. */ |
710 | while (!end2 |
711 | && (end1 |
712 | || (methods_equal_p (decl1: ref1->referred->decl, |
713 | decl2: ref2->referred->decl) |
714 | && TREE_CODE (ref1->referred->decl) == FUNCTION_DECL)) |
715 | && TREE_CODE (ref2->referred->decl) != FUNCTION_DECL) |
716 | { |
717 | if (!class_type->rtti_broken) |
718 | { |
719 | auto_diagnostic_group d; |
720 | if (warning_at (DECL_SOURCE_LOCATION |
721 | (TYPE_NAME (DECL_CONTEXT (vtable->decl))), |
722 | OPT_Wodr, |
723 | "virtual table of type %qD contains RTTI " |
724 | "information" , |
725 | DECL_CONTEXT (vtable->decl))) |
726 | { |
727 | inform (DECL_SOURCE_LOCATION |
728 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
729 | "but is prevailed by one without from other" |
730 | " translation unit" ); |
731 | inform (DECL_SOURCE_LOCATION |
732 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
733 | "RTTI will not work on this type" ); |
734 | class_type->rtti_broken = true; |
735 | } |
736 | } |
737 | n2++; |
738 | end2 = !vtable->iterate_reference (i: n2, ref&: ref2); |
739 | } |
740 | while (!end1 |
741 | && (end2 |
742 | || (methods_equal_p (decl1: ref2->referred->decl, decl2: ref1->referred->decl) |
743 | && TREE_CODE (ref2->referred->decl) == FUNCTION_DECL)) |
744 | && TREE_CODE (ref1->referred->decl) != FUNCTION_DECL) |
745 | { |
746 | n1++; |
747 | end1 = !prevailing->iterate_reference (i: n1, ref&: ref1); |
748 | } |
749 | |
750 | /* Finished? */ |
751 | if (end1 && end2) |
752 | { |
753 | /* Extra paranoia; compare the sizes. We do not have information |
754 | about virtual inheritance offsets, so just be sure that these |
755 | match. |
756 | Do this as very last check so the not very informative error |
757 | is not output too often. */ |
758 | if (DECL_SIZE (prevailing->decl) != DECL_SIZE (vtable->decl)) |
759 | { |
760 | class_type->odr_violated = true; |
761 | auto_diagnostic_group d; |
762 | tree ctx = TYPE_NAME (DECL_CONTEXT (vtable->decl)); |
763 | if (warning_at (DECL_SOURCE_LOCATION (ctx), OPT_Wodr, |
764 | "virtual table of type %qD violates " |
765 | "one definition rule" , |
766 | DECL_CONTEXT (vtable->decl))) |
767 | { |
768 | ctx = TYPE_NAME (DECL_CONTEXT (prevailing->decl)); |
769 | inform (DECL_SOURCE_LOCATION (ctx), |
770 | "the conflicting type defined in another translation" |
771 | " unit has virtual table of different size" ); |
772 | } |
773 | } |
774 | return; |
775 | } |
776 | |
777 | if (!end1 && !end2) |
778 | { |
779 | if (methods_equal_p (decl1: ref1->referred->decl, decl2: ref2->referred->decl)) |
780 | continue; |
781 | |
782 | class_type->odr_violated = true; |
783 | |
784 | /* If the loops above stopped on non-virtual pointer, we have |
785 | mismatch in RTTI information mangling. */ |
786 | if (TREE_CODE (ref1->referred->decl) != FUNCTION_DECL |
787 | && TREE_CODE (ref2->referred->decl) != FUNCTION_DECL) |
788 | { |
789 | auto_diagnostic_group d; |
790 | if (warning_at (DECL_SOURCE_LOCATION |
791 | (TYPE_NAME (DECL_CONTEXT (vtable->decl))), |
792 | OPT_Wodr, |
793 | "virtual table of type %qD violates " |
794 | "one definition rule" , |
795 | DECL_CONTEXT (vtable->decl))) |
796 | { |
797 | inform (DECL_SOURCE_LOCATION |
798 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
799 | "the conflicting type defined in another translation " |
800 | "unit with different RTTI information" ); |
801 | } |
802 | return; |
803 | } |
804 | /* At this point both REF1 and REF2 points either to virtual table |
805 | or virtual method. If one points to virtual table and other to |
806 | method we can complain the same way as if one table was shorter |
807 | than other pointing out the extra method. */ |
808 | if (TREE_CODE (ref1->referred->decl) |
809 | != TREE_CODE (ref2->referred->decl)) |
810 | { |
811 | if (VAR_P (ref1->referred->decl)) |
812 | end1 = true; |
813 | else if (VAR_P (ref2->referred->decl)) |
814 | end2 = true; |
815 | } |
816 | } |
817 | |
818 | class_type->odr_violated = true; |
819 | |
820 | /* Complain about size mismatch. Either we have too many virtual |
821 | functions or too many virtual table pointers. */ |
822 | if (end1 || end2) |
823 | { |
824 | if (end1) |
825 | { |
826 | varpool_node *tmp = prevailing; |
827 | prevailing = vtable; |
828 | vtable = tmp; |
829 | ref1 = ref2; |
830 | } |
831 | auto_diagnostic_group d; |
832 | if (warning_at (DECL_SOURCE_LOCATION |
833 | (TYPE_NAME (DECL_CONTEXT (vtable->decl))), |
834 | OPT_Wodr, |
835 | "virtual table of type %qD violates " |
836 | "one definition rule" , |
837 | DECL_CONTEXT (vtable->decl))) |
838 | { |
839 | if (TREE_CODE (ref1->referring->decl) == FUNCTION_DECL) |
840 | { |
841 | inform (DECL_SOURCE_LOCATION |
842 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
843 | "the conflicting type defined in another translation " |
844 | "unit" ); |
845 | inform (DECL_SOURCE_LOCATION |
846 | (TYPE_NAME (DECL_CONTEXT (ref1->referring->decl))), |
847 | "contains additional virtual method %qD" , |
848 | ref1->referred->decl); |
849 | } |
850 | else |
851 | { |
852 | inform (DECL_SOURCE_LOCATION |
853 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
854 | "the conflicting type defined in another translation " |
855 | "unit has virtual table with more entries" ); |
856 | } |
857 | } |
858 | return; |
859 | } |
860 | |
861 | /* And in the last case we have either mismatch in between two virtual |
862 | methods or two virtual table pointers. */ |
863 | auto_diagnostic_group d; |
864 | if (warning_at (DECL_SOURCE_LOCATION |
865 | (TYPE_NAME (DECL_CONTEXT (vtable->decl))), OPT_Wodr, |
866 | "virtual table of type %qD violates " |
867 | "one definition rule" , |
868 | DECL_CONTEXT (vtable->decl))) |
869 | { |
870 | if (TREE_CODE (ref1->referred->decl) == FUNCTION_DECL) |
871 | { |
872 | inform (DECL_SOURCE_LOCATION |
873 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
874 | "the conflicting type defined in another translation " |
875 | "unit" ); |
876 | gcc_assert (TREE_CODE (ref2->referred->decl) |
877 | == FUNCTION_DECL); |
878 | inform (DECL_SOURCE_LOCATION |
879 | (ref1->referred->ultimate_alias_target ()->decl), |
880 | "virtual method %qD" , |
881 | ref1->referred->ultimate_alias_target ()->decl); |
882 | inform (DECL_SOURCE_LOCATION |
883 | (ref2->referred->ultimate_alias_target ()->decl), |
884 | "ought to match virtual method %qD but does not" , |
885 | ref2->referred->ultimate_alias_target ()->decl); |
886 | } |
887 | else |
888 | inform (DECL_SOURCE_LOCATION |
889 | (TYPE_NAME (DECL_CONTEXT (prevailing->decl))), |
890 | "the conflicting type defined in another translation " |
891 | "unit has virtual table with different contents" ); |
892 | return; |
893 | } |
894 | } |
895 | } |
896 | |
897 | /* Output ODR violation warning about T1 and T2 with REASON. |
898 | Display location of ST1 and ST2 if REASON speaks about field or |
899 | method of the type. |
900 | If WARN is false, do nothing. Set WARNED if warning was indeed |
901 | output. */ |
902 | |
903 | static void |
904 | warn_odr (tree t1, tree t2, tree st1, tree st2, |
905 | bool warn, bool *warned, const char *reason) |
906 | { |
907 | tree decl2 = TYPE_NAME (TYPE_MAIN_VARIANT (t2)); |
908 | if (warned) |
909 | *warned = false; |
910 | |
911 | if (!warn || !TYPE_NAME(TYPE_MAIN_VARIANT (t1))) |
912 | return; |
913 | |
914 | /* ODR warnings are output during LTO streaming; we must apply location |
915 | cache for potential warnings to be output correctly. */ |
916 | if (lto_location_cache::current_cache) |
917 | lto_location_cache::current_cache->apply_location_cache (); |
918 | |
919 | auto_diagnostic_group d; |
920 | if (t1 != TYPE_MAIN_VARIANT (t1) |
921 | && TYPE_NAME (t1) != TYPE_NAME (TYPE_MAIN_VARIANT (t1))) |
922 | { |
923 | if (!warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (TYPE_MAIN_VARIANT (t1))), |
924 | OPT_Wodr, "type %qT (typedef of %qT) violates the " |
925 | "C++ One Definition Rule" , |
926 | t1, TYPE_MAIN_VARIANT (t1))) |
927 | return; |
928 | } |
929 | else |
930 | { |
931 | if (!warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (TYPE_MAIN_VARIANT (t1))), |
932 | OPT_Wodr, "type %qT violates the C++ One Definition Rule" , |
933 | t1)) |
934 | return; |
935 | } |
936 | if (!st1 && !st2) |
937 | ; |
938 | /* For FIELD_DECL support also case where one of fields is |
939 | NULL - this is used when the structures have mismatching number of |
940 | elements. */ |
941 | else if (!st1 || TREE_CODE (st1) == FIELD_DECL) |
942 | { |
943 | inform (DECL_SOURCE_LOCATION (decl2), |
944 | "a different type is defined in another translation unit" ); |
945 | if (!st1) |
946 | { |
947 | st1 = st2; |
948 | st2 = NULL; |
949 | } |
950 | inform (DECL_SOURCE_LOCATION (st1), |
951 | "the first difference of corresponding definitions is field %qD" , |
952 | st1); |
953 | if (st2) |
954 | decl2 = st2; |
955 | } |
956 | else if (TREE_CODE (st1) == FUNCTION_DECL) |
957 | { |
958 | inform (DECL_SOURCE_LOCATION (decl2), |
959 | "a different type is defined in another translation unit" ); |
960 | inform (DECL_SOURCE_LOCATION (st1), |
961 | "the first difference of corresponding definitions is method %qD" , |
962 | st1); |
963 | decl2 = st2; |
964 | } |
965 | else |
966 | return; |
967 | inform (DECL_SOURCE_LOCATION (decl2), reason); |
968 | |
969 | if (warned) |
970 | *warned = true; |
971 | } |
972 | |
973 | /* Return true if T1 and T2 are incompatible and we want to recursively |
974 | dive into them from warn_type_mismatch to give sensible answer. */ |
975 | |
976 | static bool |
977 | type_mismatch_p (tree t1, tree t2) |
978 | { |
979 | if (odr_or_derived_type_p (t: t1) && odr_or_derived_type_p (t: t2) |
980 | && !odr_types_equivalent_p (type1: t1, type2: t2)) |
981 | return true; |
982 | return !types_compatible_p (type1: t1, type2: t2); |
983 | } |
984 | |
985 | |
986 | /* Types T1 and T2 was found to be incompatible in a context they can't |
987 | (either used to declare a symbol of same assembler name or unified by |
988 | ODR rule). We already output warning about this, but if possible, output |
989 | extra information on how the types mismatch. |
990 | |
991 | This is hard to do in general. We basically handle the common cases. |
992 | |
993 | If LOC1 and LOC2 are meaningful locations, use it in the case the types |
994 | themselves do not have one. */ |
995 | |
996 | void |
997 | warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) |
998 | { |
999 | /* Location of type is known only if it has TYPE_NAME and the name is |
1000 | TYPE_DECL. */ |
1001 | location_t loc_t1 = TYPE_NAME (t1) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL |
1002 | ? DECL_SOURCE_LOCATION (TYPE_NAME (t1)) |
1003 | : UNKNOWN_LOCATION; |
1004 | location_t loc_t2 = TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL |
1005 | ? DECL_SOURCE_LOCATION (TYPE_NAME (t2)) |
1006 | : UNKNOWN_LOCATION; |
1007 | bool loc_t2_useful = false; |
1008 | |
1009 | /* With LTO it is a common case that the location of both types match. |
1010 | See if T2 has a location that is different from T1. If so, we will |
1011 | inform user about the location. |
1012 | Do not consider the location passed to us in LOC1/LOC2 as those are |
1013 | already output. */ |
1014 | if (loc_t2 > BUILTINS_LOCATION && loc_t2 != loc_t1) |
1015 | { |
1016 | if (loc_t1 <= BUILTINS_LOCATION) |
1017 | loc_t2_useful = true; |
1018 | else |
1019 | { |
1020 | expanded_location xloc1 = expand_location (loc_t1); |
1021 | expanded_location xloc2 = expand_location (loc_t2); |
1022 | |
1023 | if (strcmp (s1: xloc1.file, s2: xloc2.file) |
1024 | || xloc1.line != xloc2.line |
1025 | || xloc1.column != xloc2.column) |
1026 | loc_t2_useful = true; |
1027 | } |
1028 | } |
1029 | |
1030 | if (loc_t1 <= BUILTINS_LOCATION) |
1031 | loc_t1 = loc1; |
1032 | if (loc_t2 <= BUILTINS_LOCATION) |
1033 | loc_t2 = loc2; |
1034 | |
1035 | location_t loc = loc_t1 <= BUILTINS_LOCATION ? loc_t2 : loc_t1; |
1036 | |
1037 | /* It is a quite common bug to reference anonymous namespace type in |
1038 | non-anonymous namespace class. */ |
1039 | tree mt1 = TYPE_MAIN_VARIANT (t1); |
1040 | tree mt2 = TYPE_MAIN_VARIANT (t2); |
1041 | if ((type_with_linkage_p (t: mt1) |
1042 | && type_in_anonymous_namespace_p (t: mt1)) |
1043 | || (type_with_linkage_p (t: mt2) |
1044 | && type_in_anonymous_namespace_p (t: mt2))) |
1045 | { |
1046 | if (!type_with_linkage_p (t: mt1) |
1047 | || !type_in_anonymous_namespace_p (t: mt1)) |
1048 | { |
1049 | std::swap (a&: t1, b&: t2); |
1050 | std::swap (a&: mt1, b&: mt2); |
1051 | std::swap (a&: loc_t1, b&: loc_t2); |
1052 | } |
1053 | gcc_assert (TYPE_NAME (mt1) |
1054 | && TREE_CODE (TYPE_NAME (mt1)) == TYPE_DECL); |
1055 | tree n1 = TYPE_NAME (mt1); |
1056 | tree n2 = TYPE_NAME (mt2) ? TYPE_NAME (mt2) : NULL; |
1057 | |
1058 | if (TREE_CODE (n1) == TYPE_DECL) |
1059 | n1 = DECL_NAME (n1); |
1060 | if (n2 && TREE_CODE (n2) == TYPE_DECL) |
1061 | n2 = DECL_NAME (n2); |
1062 | /* Most of the time, the type names will match, do not be unnecessarily |
1063 | verbose. */ |
1064 | if (n1 != n2) |
1065 | inform (loc_t1, |
1066 | "type %qT defined in anonymous namespace cannot match " |
1067 | "type %qT across the translation unit boundary" , |
1068 | t1, t2); |
1069 | else |
1070 | inform (loc_t1, |
1071 | "type %qT defined in anonymous namespace cannot match " |
1072 | "across the translation unit boundary" , |
1073 | t1); |
1074 | if (loc_t2_useful) |
1075 | inform (loc_t2, |
1076 | "the incompatible type defined in another translation unit" ); |
1077 | return; |
1078 | } |
1079 | /* If types have mangled ODR names and they are different, it is most |
1080 | informative to output those. |
1081 | This also covers types defined in different namespaces. */ |
1082 | const char *odr1 = get_odr_name_for_type (type: mt1); |
1083 | const char *odr2 = get_odr_name_for_type (type: mt2); |
1084 | if (odr1 != NULL && odr2 != NULL && odr1 != odr2) |
1085 | { |
1086 | const int opts = DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES; |
1087 | char *name1 = xstrdup (cplus_demangle (mangled: odr1, options: opts)); |
1088 | char *name2 = cplus_demangle (mangled: odr2, options: opts); |
1089 | if (name1 && name2 && strcmp (s1: name1, s2: name2)) |
1090 | { |
1091 | inform (loc_t1, |
1092 | "type name %qs should match type name %qs" , |
1093 | name1, name2); |
1094 | if (loc_t2_useful) |
1095 | inform (loc_t2, |
1096 | "the incompatible type is defined here" ); |
1097 | free (ptr: name1); |
1098 | return; |
1099 | } |
1100 | free (ptr: name1); |
1101 | } |
1102 | /* A tricky case are compound types. Often they appear the same in source |
1103 | code and the mismatch is dragged in by type they are build from. |
1104 | Look for those differences in subtypes and try to be informative. In other |
1105 | cases just output nothing because the source code is probably different |
1106 | and in this case we already output a all necessary info. */ |
1107 | if (!TYPE_NAME (t1) || !TYPE_NAME (t2)) |
1108 | { |
1109 | if (TREE_CODE (t1) == TREE_CODE (t2)) |
1110 | { |
1111 | if (TREE_CODE (t1) == ARRAY_TYPE |
1112 | && COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) |
1113 | { |
1114 | tree i1 = TYPE_DOMAIN (t1); |
1115 | tree i2 = TYPE_DOMAIN (t2); |
1116 | |
1117 | if (i1 && i2 |
1118 | && TYPE_MAX_VALUE (i1) |
1119 | && TYPE_MAX_VALUE (i2) |
1120 | && !operand_equal_p (TYPE_MAX_VALUE (i1), |
1121 | TYPE_MAX_VALUE (i2), flags: 0)) |
1122 | { |
1123 | inform (loc, |
1124 | "array types have different bounds" ); |
1125 | return; |
1126 | } |
1127 | } |
1128 | if ((POINTER_TYPE_P (t1) || TREE_CODE (t1) == ARRAY_TYPE) |
1129 | && type_mismatch_p (TREE_TYPE (t1), TREE_TYPE (t2))) |
1130 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1: loc_t1, loc2: loc_t2); |
1131 | else if (TREE_CODE (t1) == METHOD_TYPE |
1132 | || TREE_CODE (t1) == FUNCTION_TYPE) |
1133 | { |
1134 | tree parms1 = NULL, parms2 = NULL; |
1135 | int count = 1; |
1136 | |
1137 | if (type_mismatch_p (TREE_TYPE (t1), TREE_TYPE (t2))) |
1138 | { |
1139 | inform (loc, "return value type mismatch" ); |
1140 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1: loc_t1, |
1141 | loc2: loc_t2); |
1142 | return; |
1143 | } |
1144 | if (prototype_p (t1) && prototype_p (t2)) |
1145 | for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); |
1146 | parms1 && parms2; |
1147 | parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2), |
1148 | count++) |
1149 | { |
1150 | if (type_mismatch_p (TREE_VALUE (parms1), TREE_VALUE (parms2))) |
1151 | { |
1152 | if (count == 1 && TREE_CODE (t1) == METHOD_TYPE) |
1153 | inform (loc, |
1154 | "implicit this pointer type mismatch" ); |
1155 | else |
1156 | inform (loc, |
1157 | "type mismatch in parameter %i" , |
1158 | count - (TREE_CODE (t1) == METHOD_TYPE)); |
1159 | warn_types_mismatch (TREE_VALUE (parms1), |
1160 | TREE_VALUE (parms2), |
1161 | loc1: loc_t1, loc2: loc_t2); |
1162 | return; |
1163 | } |
1164 | } |
1165 | if (parms1 || parms2) |
1166 | { |
1167 | inform (loc, |
1168 | "types have different parameter counts" ); |
1169 | return; |
1170 | } |
1171 | } |
1172 | } |
1173 | return; |
1174 | } |
1175 | |
1176 | if (types_odr_comparable (t1, t2) |
1177 | /* We make assign integers mangled names to be able to handle |
1178 | signed/unsigned chars. Accepting them here would however lead to |
1179 | confusing message like |
1180 | "type ‘const int’ itself violates the C++ One Definition Rule" */ |
1181 | && TREE_CODE (t1) != INTEGER_TYPE |
1182 | && types_same_for_odr (type1: t1, type2: t2)) |
1183 | inform (loc_t1, |
1184 | "type %qT itself violates the C++ One Definition Rule" , t1); |
1185 | /* Prevent pointless warnings like "struct aa" should match "struct aa". */ |
1186 | else if (TYPE_NAME (t1) == TYPE_NAME (t2) |
1187 | && TREE_CODE (t1) == TREE_CODE (t2) && !loc_t2_useful) |
1188 | return; |
1189 | else |
1190 | inform (loc_t1, "type %qT should match type %qT" , |
1191 | t1, t2); |
1192 | if (loc_t2_useful) |
1193 | inform (loc_t2, "the incompatible type is defined here" ); |
1194 | } |
1195 | |
1196 | /* Return true if T should be ignored in TYPE_FIELDS for ODR comparison. */ |
1197 | |
1198 | static bool |
1199 | skip_in_fields_list_p (tree t) |
1200 | { |
1201 | if (TREE_CODE (t) != FIELD_DECL) |
1202 | return true; |
1203 | /* C++ FE introduces zero sized fields depending on -std setting, see |
1204 | PR89358. */ |
1205 | if (DECL_SIZE (t) |
1206 | && integer_zerop (DECL_SIZE (t)) |
1207 | && DECL_ARTIFICIAL (t) |
1208 | && DECL_IGNORED_P (t) |
1209 | && !DECL_NAME (t)) |
1210 | return true; |
1211 | return false; |
1212 | } |
1213 | |
1214 | /* Compare T1 and T2, report ODR violations if WARN is true and set |
1215 | WARNED to true if anything is reported. Return true if types match. |
1216 | If true is returned, the types are also compatible in the sense of |
1217 | gimple_canonical_types_compatible_p. |
1218 | If LOC1 and LOC2 is not UNKNOWN_LOCATION it may be used to output a warning |
1219 | about the type if the type itself do not have location. */ |
1220 | |
1221 | static bool |
1222 | odr_types_equivalent_p (tree t1, tree t2, bool warn, bool *warned, |
1223 | hash_set<type_pair> *visited, |
1224 | location_t loc1, location_t loc2) |
1225 | { |
1226 | /* If we are asked to warn, we need warned to keep track if warning was |
1227 | output. */ |
1228 | gcc_assert (!warn || warned); |
1229 | /* Check first for the obvious case of pointer identity. */ |
1230 | if (t1 == t2) |
1231 | return true; |
1232 | |
1233 | /* Can't be the same type if the types don't have the same code. */ |
1234 | if (TREE_CODE (t1) != TREE_CODE (t2)) |
1235 | { |
1236 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1237 | G_("a different type is defined in another translation unit" )); |
1238 | return false; |
1239 | } |
1240 | |
1241 | if ((type_with_linkage_p (TYPE_MAIN_VARIANT (t1)) |
1242 | && type_in_anonymous_namespace_p (TYPE_MAIN_VARIANT (t1))) |
1243 | || (type_with_linkage_p (TYPE_MAIN_VARIANT (t2)) |
1244 | && type_in_anonymous_namespace_p (TYPE_MAIN_VARIANT (t2)))) |
1245 | { |
1246 | /* We cannot trip this when comparing ODR types, only when trying to |
1247 | match different ODR derivations from different declarations. |
1248 | So WARN should be always false. */ |
1249 | gcc_assert (!warn); |
1250 | return false; |
1251 | } |
1252 | |
1253 | /* Non-aggregate types can be handled cheaply. */ |
1254 | if (INTEGRAL_TYPE_P (t1) |
1255 | || SCALAR_FLOAT_TYPE_P (t1) |
1256 | || FIXED_POINT_TYPE_P (t1) |
1257 | || VECTOR_TYPE_P (t1) |
1258 | || TREE_CODE (t1) == COMPLEX_TYPE |
1259 | || TREE_CODE (t1) == OFFSET_TYPE |
1260 | || POINTER_TYPE_P (t1)) |
1261 | { |
1262 | if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)) |
1263 | { |
1264 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1265 | G_("a type with different precision is defined " |
1266 | "in another translation unit" )); |
1267 | return false; |
1268 | } |
1269 | if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) |
1270 | { |
1271 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1272 | G_("a type with different signedness is defined " |
1273 | "in another translation unit" )); |
1274 | return false; |
1275 | } |
1276 | |
1277 | if (TREE_CODE (t1) == INTEGER_TYPE |
1278 | && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)) |
1279 | { |
1280 | /* char WRT uint_8? */ |
1281 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1282 | G_("a different type is defined in another " |
1283 | "translation unit" )); |
1284 | return false; |
1285 | } |
1286 | |
1287 | /* For canonical type comparisons we do not want to build SCCs |
1288 | so we cannot compare pointed-to types. But we can, for now, |
1289 | require the same pointed-to type kind and match what |
1290 | useless_type_conversion_p would do. */ |
1291 | if (POINTER_TYPE_P (t1)) |
1292 | { |
1293 | if (TYPE_ADDR_SPACE (TREE_TYPE (t1)) |
1294 | != TYPE_ADDR_SPACE (TREE_TYPE (t2))) |
1295 | { |
1296 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1297 | G_("it is defined as a pointer in different address " |
1298 | "space in another translation unit" )); |
1299 | return false; |
1300 | } |
1301 | |
1302 | if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), |
1303 | visited, loc1, loc2)) |
1304 | { |
1305 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1306 | G_("it is defined as a pointer to different type " |
1307 | "in another translation unit" )); |
1308 | if (warn && *warned) |
1309 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), |
1310 | loc1, loc2); |
1311 | return false; |
1312 | } |
1313 | } |
1314 | |
1315 | if ((VECTOR_TYPE_P (t1) || TREE_CODE (t1) == COMPLEX_TYPE) |
1316 | && !odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), |
1317 | visited, loc1, loc2)) |
1318 | { |
1319 | /* Probably specific enough. */ |
1320 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1321 | G_("a different type is defined " |
1322 | "in another translation unit" )); |
1323 | if (warn && *warned) |
1324 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); |
1325 | return false; |
1326 | } |
1327 | } |
1328 | /* Do type-specific comparisons. */ |
1329 | else switch (TREE_CODE (t1)) |
1330 | { |
1331 | case ARRAY_TYPE: |
1332 | { |
1333 | /* Array types are the same if the element types are the same and |
1334 | the number of elements are the same. */ |
1335 | if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), |
1336 | visited, loc1, loc2)) |
1337 | { |
1338 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1339 | G_("a different type is defined in another " |
1340 | "translation unit" )); |
1341 | if (warn && *warned) |
1342 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); |
1343 | } |
1344 | gcc_assert (TYPE_STRING_FLAG (t1) == TYPE_STRING_FLAG (t2)); |
1345 | gcc_assert (TYPE_NONALIASED_COMPONENT (t1) |
1346 | == TYPE_NONALIASED_COMPONENT (t2)); |
1347 | |
1348 | tree i1 = TYPE_DOMAIN (t1); |
1349 | tree i2 = TYPE_DOMAIN (t2); |
1350 | |
1351 | /* For an incomplete external array, the type domain can be |
1352 | NULL_TREE. Check this condition also. */ |
1353 | if (i1 == NULL_TREE || i2 == NULL_TREE) |
1354 | return type_variants_equivalent_p (t1, t2); |
1355 | |
1356 | tree min1 = TYPE_MIN_VALUE (i1); |
1357 | tree min2 = TYPE_MIN_VALUE (i2); |
1358 | tree max1 = TYPE_MAX_VALUE (i1); |
1359 | tree max2 = TYPE_MAX_VALUE (i2); |
1360 | |
1361 | /* In C++, minimums should be always 0. */ |
1362 | gcc_assert (min1 == min2); |
1363 | if (!operand_equal_p (max1, max2, flags: 0)) |
1364 | { |
1365 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1366 | G_("an array of different size is defined " |
1367 | "in another translation unit" )); |
1368 | return false; |
1369 | } |
1370 | } |
1371 | break; |
1372 | |
1373 | case METHOD_TYPE: |
1374 | case FUNCTION_TYPE: |
1375 | /* Function types are the same if the return type and arguments types |
1376 | are the same. */ |
1377 | if (!odr_subtypes_equivalent_p (TREE_TYPE (t1), TREE_TYPE (t2), |
1378 | visited, loc1, loc2)) |
1379 | { |
1380 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1381 | G_("has different return value " |
1382 | "in another translation unit" )); |
1383 | if (warn && *warned) |
1384 | warn_types_mismatch (TREE_TYPE (t1), TREE_TYPE (t2), loc1, loc2); |
1385 | return false; |
1386 | } |
1387 | |
1388 | if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2) |
1389 | || !prototype_p (t1) || !prototype_p (t2)) |
1390 | return type_variants_equivalent_p (t1, t2); |
1391 | else |
1392 | { |
1393 | tree parms1, parms2; |
1394 | |
1395 | for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2); |
1396 | parms1 && parms2; |
1397 | parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2)) |
1398 | { |
1399 | if (!odr_subtypes_equivalent_p |
1400 | (TREE_VALUE (parms1), TREE_VALUE (parms2), |
1401 | visited, loc1, loc2)) |
1402 | { |
1403 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1404 | G_("has different parameters in another " |
1405 | "translation unit" )); |
1406 | if (warn && *warned) |
1407 | warn_types_mismatch (TREE_VALUE (parms1), |
1408 | TREE_VALUE (parms2), loc1, loc2); |
1409 | return false; |
1410 | } |
1411 | } |
1412 | |
1413 | if (parms1 || parms2) |
1414 | { |
1415 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1416 | G_("has different parameters " |
1417 | "in another translation unit" )); |
1418 | return false; |
1419 | } |
1420 | |
1421 | return type_variants_equivalent_p (t1, t2); |
1422 | } |
1423 | |
1424 | case RECORD_TYPE: |
1425 | case UNION_TYPE: |
1426 | case QUAL_UNION_TYPE: |
1427 | { |
1428 | tree f1, f2; |
1429 | |
1430 | /* For aggregate types, all the fields must be the same. */ |
1431 | if (COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) |
1432 | { |
1433 | if (TYPE_BINFO (t1) && TYPE_BINFO (t2) |
1434 | && polymorphic_type_binfo_p (TYPE_BINFO (t1)) |
1435 | != polymorphic_type_binfo_p (TYPE_BINFO (t2))) |
1436 | { |
1437 | if (polymorphic_type_binfo_p (TYPE_BINFO (t1))) |
1438 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1439 | G_("a type defined in another translation unit " |
1440 | "is not polymorphic" )); |
1441 | else |
1442 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1443 | G_("a type defined in another translation unit " |
1444 | "is polymorphic" )); |
1445 | return false; |
1446 | } |
1447 | for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2); |
1448 | f1 || f2; |
1449 | f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2)) |
1450 | { |
1451 | /* Skip non-fields. */ |
1452 | while (f1 && skip_in_fields_list_p (t: f1)) |
1453 | f1 = TREE_CHAIN (f1); |
1454 | while (f2 && skip_in_fields_list_p (t: f2)) |
1455 | f2 = TREE_CHAIN (f2); |
1456 | if (!f1 || !f2) |
1457 | break; |
1458 | if (DECL_VIRTUAL_P (f1) != DECL_VIRTUAL_P (f2)) |
1459 | { |
1460 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1461 | G_("a type with different virtual table pointers" |
1462 | " is defined in another translation unit" )); |
1463 | return false; |
1464 | } |
1465 | if (DECL_ARTIFICIAL (f1) != DECL_ARTIFICIAL (f2)) |
1466 | { |
1467 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1468 | G_("a type with different bases is defined " |
1469 | "in another translation unit" )); |
1470 | return false; |
1471 | } |
1472 | if (DECL_NAME (f1) != DECL_NAME (f2) |
1473 | && !DECL_ARTIFICIAL (f1)) |
1474 | { |
1475 | warn_odr (t1, t2, st1: f1, st2: f2, warn, warned, |
1476 | G_("a field with different name is defined " |
1477 | "in another translation unit" )); |
1478 | return false; |
1479 | } |
1480 | if (!odr_subtypes_equivalent_p (TREE_TYPE (f1), |
1481 | TREE_TYPE (f2), |
1482 | visited, loc1, loc2)) |
1483 | { |
1484 | /* Do not warn about artificial fields and just go into |
1485 | generic field mismatch warning. */ |
1486 | if (DECL_ARTIFICIAL (f1)) |
1487 | break; |
1488 | |
1489 | warn_odr (t1, t2, st1: f1, st2: f2, warn, warned, |
1490 | G_("a field of same name but different type " |
1491 | "is defined in another translation unit" )); |
1492 | if (warn && *warned) |
1493 | warn_types_mismatch (TREE_TYPE (f1), TREE_TYPE (f2), loc1, loc2); |
1494 | return false; |
1495 | } |
1496 | if (!gimple_compare_field_offset (f1, f2)) |
1497 | { |
1498 | /* Do not warn about artificial fields and just go into |
1499 | generic field mismatch warning. */ |
1500 | if (DECL_ARTIFICIAL (f1)) |
1501 | break; |
1502 | warn_odr (t1, t2, st1: f1, st2: f2, warn, warned, |
1503 | G_("fields have different layout " |
1504 | "in another translation unit" )); |
1505 | return false; |
1506 | } |
1507 | if (DECL_BIT_FIELD (f1) != DECL_BIT_FIELD (f2)) |
1508 | { |
1509 | warn_odr (t1, t2, st1: f1, st2: f2, warn, warned, |
1510 | G_("one field is a bitfield while the other " |
1511 | "is not" )); |
1512 | return false; |
1513 | } |
1514 | else |
1515 | gcc_assert (DECL_NONADDRESSABLE_P (f1) |
1516 | == DECL_NONADDRESSABLE_P (f2)); |
1517 | } |
1518 | |
1519 | /* If one aggregate has more fields than the other, they |
1520 | are not the same. */ |
1521 | if (f1 || f2) |
1522 | { |
1523 | if ((f1 && DECL_VIRTUAL_P (f1)) || (f2 && DECL_VIRTUAL_P (f2))) |
1524 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1525 | G_("a type with different virtual table pointers" |
1526 | " is defined in another translation unit" )); |
1527 | else if ((f1 && DECL_ARTIFICIAL (f1)) |
1528 | || (f2 && DECL_ARTIFICIAL (f2))) |
1529 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1530 | G_("a type with different bases is defined " |
1531 | "in another translation unit" )); |
1532 | else |
1533 | warn_odr (t1, t2, st1: f1, st2: f2, warn, warned, |
1534 | G_("a type with different number of fields " |
1535 | "is defined in another translation unit" )); |
1536 | |
1537 | return false; |
1538 | } |
1539 | } |
1540 | break; |
1541 | } |
1542 | case VOID_TYPE: |
1543 | case OPAQUE_TYPE: |
1544 | case NULLPTR_TYPE: |
1545 | break; |
1546 | |
1547 | default: |
1548 | debug_tree (t1); |
1549 | gcc_unreachable (); |
1550 | } |
1551 | |
1552 | /* Those are better to come last as they are utterly uninformative. */ |
1553 | if (TYPE_SIZE (t1) && TYPE_SIZE (t2) |
1554 | && !operand_equal_p (TYPE_SIZE (t1), TYPE_SIZE (t2), flags: 0)) |
1555 | { |
1556 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1557 | G_("a type with different size " |
1558 | "is defined in another translation unit" )); |
1559 | return false; |
1560 | } |
1561 | |
1562 | if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2) |
1563 | && COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) |
1564 | { |
1565 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1566 | G_("one type needs to be constructed while the other does not" )); |
1567 | gcc_checking_assert (RECORD_OR_UNION_TYPE_P (t1)); |
1568 | return false; |
1569 | } |
1570 | /* There is no really good user facing warning for this. |
1571 | Either the original reason for modes being different is lost during |
1572 | streaming or we should catch earlier warnings. We however must detect |
1573 | the mismatch to avoid type verifier from cmplaining on mismatched |
1574 | types between type and canonical type. See PR91576. */ |
1575 | if (TYPE_MODE (t1) != TYPE_MODE (t2) |
1576 | && COMPLETE_TYPE_P (t1) && COMPLETE_TYPE_P (t2)) |
1577 | { |
1578 | warn_odr (t1, t2, NULL, NULL, warn, warned, |
1579 | G_("memory layout mismatch" )); |
1580 | return false; |
1581 | } |
1582 | |
1583 | gcc_assert (!TYPE_SIZE_UNIT (t1) || !TYPE_SIZE_UNIT (t2) |
1584 | || operand_equal_p (TYPE_SIZE_UNIT (t1), |
1585 | TYPE_SIZE_UNIT (t2), 0)); |
1586 | return type_variants_equivalent_p (t1, t2); |
1587 | } |
1588 | |
1589 | /* Return true if TYPE1 and TYPE2 are equivalent for One Definition Rule. */ |
1590 | |
1591 | bool |
1592 | odr_types_equivalent_p (tree type1, tree type2) |
1593 | { |
1594 | gcc_checking_assert (odr_or_derived_type_p (type1) |
1595 | && odr_or_derived_type_p (type2)); |
1596 | |
1597 | hash_set<type_pair> visited; |
1598 | return odr_types_equivalent_p (t1: type1, t2: type2, warn: false, NULL, |
1599 | visited: &visited, UNKNOWN_LOCATION, UNKNOWN_LOCATION); |
1600 | } |
1601 | |
1602 | /* TYPE is equivalent to VAL by ODR, but its tree representation differs |
1603 | from VAL->type. This may happen in LTO where tree merging did not merge |
1604 | all variants of the same type or due to ODR violation. |
1605 | |
1606 | Analyze and report ODR violations and add type to duplicate list. |
1607 | If TYPE is more specified than VAL->type, prevail VAL->type. Also if |
1608 | this is first time we see definition of a class return true so the |
1609 | base types are analyzed. */ |
1610 | |
1611 | static bool |
1612 | add_type_duplicate (odr_type val, tree type) |
1613 | { |
1614 | bool build_bases = false; |
1615 | bool prevail = false; |
1616 | bool odr_must_violate = false; |
1617 | |
1618 | if (!val->types_set) |
1619 | val->types_set = new hash_set<tree>; |
1620 | |
1621 | /* Chose polymorphic type as leader (this happens only in case of ODR |
1622 | violations. */ |
1623 | if ((TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) |
1624 | && polymorphic_type_binfo_p (TYPE_BINFO (type))) |
1625 | && (TREE_CODE (val->type) != RECORD_TYPE || !TYPE_BINFO (val->type) |
1626 | || !polymorphic_type_binfo_p (TYPE_BINFO (val->type)))) |
1627 | { |
1628 | prevail = true; |
1629 | build_bases = true; |
1630 | } |
1631 | /* Always prefer complete type to be the leader. */ |
1632 | else if (!COMPLETE_TYPE_P (val->type) && COMPLETE_TYPE_P (type)) |
1633 | { |
1634 | prevail = true; |
1635 | if (TREE_CODE (type) == RECORD_TYPE) |
1636 | build_bases = TYPE_BINFO (type); |
1637 | } |
1638 | else if (COMPLETE_TYPE_P (val->type) && !COMPLETE_TYPE_P (type)) |
1639 | ; |
1640 | else if (TREE_CODE (val->type) == RECORD_TYPE |
1641 | && TREE_CODE (type) == RECORD_TYPE |
1642 | && TYPE_BINFO (type) && !TYPE_BINFO (val->type)) |
1643 | { |
1644 | gcc_assert (!val->bases.length ()); |
1645 | build_bases = true; |
1646 | prevail = true; |
1647 | } |
1648 | |
1649 | if (prevail) |
1650 | std::swap (a&: val->type, b&: type); |
1651 | |
1652 | val->types_set->add (k: type); |
1653 | |
1654 | if (!odr_hash) |
1655 | return false; |
1656 | |
1657 | gcc_checking_assert (can_be_name_hashed_p (type) |
1658 | && can_be_name_hashed_p (val->type)); |
1659 | |
1660 | bool merge = true; |
1661 | bool base_mismatch = false; |
1662 | unsigned int i; |
1663 | bool warned = false; |
1664 | hash_set<type_pair> visited; |
1665 | |
1666 | gcc_assert (in_lto_p); |
1667 | vec_safe_push (v&: val->types, obj: type); |
1668 | |
1669 | /* If both are class types, compare the bases. */ |
1670 | if (COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type) |
1671 | && TREE_CODE (val->type) == RECORD_TYPE |
1672 | && TREE_CODE (type) == RECORD_TYPE |
1673 | && TYPE_BINFO (val->type) && TYPE_BINFO (type)) |
1674 | { |
1675 | if (BINFO_N_BASE_BINFOS (TYPE_BINFO (type)) |
1676 | != BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))) |
1677 | { |
1678 | if (!flag_ltrans && !warned && !val->odr_violated) |
1679 | { |
1680 | tree ; |
1681 | warn_odr (t1: type, t2: val->type, NULL, NULL, warn: !warned, warned: &warned, |
1682 | reason: "a type with the same name but different " |
1683 | "number of polymorphic bases is " |
1684 | "defined in another translation unit" ); |
1685 | if (warned) |
1686 | { |
1687 | if (BINFO_N_BASE_BINFOS (TYPE_BINFO (type)) |
1688 | > BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))) |
1689 | extra_base = BINFO_BASE_BINFO |
1690 | (TYPE_BINFO (type), |
1691 | BINFO_N_BASE_BINFOS (TYPE_BINFO (val->type))); |
1692 | else |
1693 | extra_base = BINFO_BASE_BINFO |
1694 | (TYPE_BINFO (val->type), |
1695 | BINFO_N_BASE_BINFOS (TYPE_BINFO (type))); |
1696 | tree = BINFO_TYPE (extra_base); |
1697 | inform (DECL_SOURCE_LOCATION (TYPE_NAME (extra_base_type)), |
1698 | "the extra base is defined here" ); |
1699 | } |
1700 | } |
1701 | base_mismatch = true; |
1702 | } |
1703 | else |
1704 | for (i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) |
1705 | { |
1706 | tree base1 = BINFO_BASE_BINFO (TYPE_BINFO (type), i); |
1707 | tree base2 = BINFO_BASE_BINFO (TYPE_BINFO (val->type), i); |
1708 | tree type1 = BINFO_TYPE (base1); |
1709 | tree type2 = BINFO_TYPE (base2); |
1710 | |
1711 | if (types_odr_comparable (t1: type1, t2: type2)) |
1712 | { |
1713 | if (!types_same_for_odr (type1, type2)) |
1714 | base_mismatch = true; |
1715 | } |
1716 | else |
1717 | if (!odr_types_equivalent_p (type1, type2)) |
1718 | base_mismatch = true; |
1719 | if (base_mismatch) |
1720 | { |
1721 | if (!warned && !val->odr_violated) |
1722 | { |
1723 | warn_odr (t1: type, t2: val->type, NULL, NULL, |
1724 | warn: !warned, warned: &warned, |
1725 | reason: "a type with the same name but different base " |
1726 | "type is defined in another translation unit" ); |
1727 | if (warned) |
1728 | warn_types_mismatch (t1: type1, t2: type2, |
1729 | UNKNOWN_LOCATION, UNKNOWN_LOCATION); |
1730 | } |
1731 | break; |
1732 | } |
1733 | if (BINFO_OFFSET (base1) != BINFO_OFFSET (base2)) |
1734 | { |
1735 | base_mismatch = true; |
1736 | if (!warned && !val->odr_violated) |
1737 | warn_odr (t1: type, t2: val->type, NULL, NULL, |
1738 | warn: !warned, warned: &warned, |
1739 | reason: "a type with the same name but different base " |
1740 | "layout is defined in another translation unit" ); |
1741 | break; |
1742 | } |
1743 | /* One of bases is not of complete type. */ |
1744 | if (!TYPE_BINFO (type1) != !TYPE_BINFO (type2)) |
1745 | { |
1746 | /* If we have a polymorphic type info specified for TYPE1 |
1747 | but not for TYPE2 we possibly missed a base when recording |
1748 | VAL->type earlier. |
1749 | Be sure this does not happen. */ |
1750 | if (TYPE_BINFO (type1) |
1751 | && polymorphic_type_binfo_p (TYPE_BINFO (type1)) |
1752 | && !build_bases) |
1753 | odr_must_violate = true; |
1754 | break; |
1755 | } |
1756 | /* One base is polymorphic and the other not. |
1757 | This ought to be diagnosed earlier, but do not ICE in the |
1758 | checking bellow. */ |
1759 | else if (TYPE_BINFO (type1) |
1760 | && polymorphic_type_binfo_p (TYPE_BINFO (type1)) |
1761 | != polymorphic_type_binfo_p (TYPE_BINFO (type2))) |
1762 | { |
1763 | if (!warned && !val->odr_violated) |
1764 | warn_odr (t1: type, t2: val->type, NULL, NULL, |
1765 | warn: !warned, warned: &warned, |
1766 | reason: "a base of the type is polymorphic only in one " |
1767 | "translation unit" ); |
1768 | base_mismatch = true; |
1769 | break; |
1770 | } |
1771 | } |
1772 | if (base_mismatch) |
1773 | { |
1774 | merge = false; |
1775 | odr_violation_reported = true; |
1776 | val->odr_violated = true; |
1777 | |
1778 | if (symtab->dump_file) |
1779 | { |
1780 | fprintf (stream: symtab->dump_file, format: "ODR base violation\n" ); |
1781 | |
1782 | print_node (symtab->dump_file, "" , val->type, 0); |
1783 | putc (c: '\n',stream: symtab->dump_file); |
1784 | print_node (symtab->dump_file, "" , type, 0); |
1785 | putc (c: '\n',stream: symtab->dump_file); |
1786 | } |
1787 | } |
1788 | } |
1789 | |
1790 | /* Next compare memory layout. |
1791 | The DECL_SOURCE_LOCATIONs in this invocation came from LTO streaming. |
1792 | We must apply the location cache to ensure that they are valid |
1793 | before we can pass them to odr_types_equivalent_p (PR lto/83121). */ |
1794 | if (lto_location_cache::current_cache) |
1795 | lto_location_cache::current_cache->apply_location_cache (); |
1796 | /* As a special case we stream mangles names of integer types so we can see |
1797 | if they are believed to be same even though they have different |
1798 | representation. Avoid bogus warning on mismatches in these. */ |
1799 | if (TREE_CODE (type) != INTEGER_TYPE |
1800 | && TREE_CODE (val->type) != INTEGER_TYPE |
1801 | && !odr_types_equivalent_p (t1: val->type, t2: type, |
1802 | warn: !flag_ltrans && !val->odr_violated && !warned, |
1803 | warned: &warned, visited: &visited, |
1804 | DECL_SOURCE_LOCATION (TYPE_NAME (val->type)), |
1805 | DECL_SOURCE_LOCATION (TYPE_NAME (type)))) |
1806 | { |
1807 | merge = false; |
1808 | odr_violation_reported = true; |
1809 | val->odr_violated = true; |
1810 | } |
1811 | gcc_assert (val->odr_violated || !odr_must_violate); |
1812 | /* Sanity check that all bases will be build same way again. */ |
1813 | if (flag_checking |
1814 | && COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type) |
1815 | && TREE_CODE (val->type) == RECORD_TYPE |
1816 | && TREE_CODE (type) == RECORD_TYPE |
1817 | && TYPE_BINFO (val->type) && TYPE_BINFO (type) |
1818 | && !val->odr_violated |
1819 | && !base_mismatch && val->bases.length ()) |
1820 | { |
1821 | unsigned int num_poly_bases = 0; |
1822 | unsigned int j; |
1823 | |
1824 | for (i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) |
1825 | if (polymorphic_type_binfo_p (BINFO_BASE_BINFO |
1826 | (TYPE_BINFO (type), i))) |
1827 | num_poly_bases++; |
1828 | gcc_assert (num_poly_bases == val->bases.length ()); |
1829 | for (j = 0, i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); |
1830 | i++) |
1831 | if (polymorphic_type_binfo_p (BINFO_BASE_BINFO |
1832 | (TYPE_BINFO (type), i))) |
1833 | { |
1834 | odr_type base = get_odr_type |
1835 | (BINFO_TYPE |
1836 | (BINFO_BASE_BINFO (TYPE_BINFO (type), |
1837 | i)), |
1838 | insert: true); |
1839 | gcc_assert (val->bases[j] == base); |
1840 | j++; |
1841 | } |
1842 | } |
1843 | |
1844 | |
1845 | /* Regularize things a little. During LTO same types may come with |
1846 | different BINFOs. Either because their virtual table was |
1847 | not merged by tree merging and only later at decl merging or |
1848 | because one type comes with external vtable, while other |
1849 | with internal. We want to merge equivalent binfos to conserve |
1850 | memory and streaming overhead. |
1851 | |
1852 | The external vtables are more harmful: they contain references |
1853 | to external declarations of methods that may be defined in the |
1854 | merged LTO unit. For this reason we absolutely need to remove |
1855 | them and replace by internal variants. Not doing so will lead |
1856 | to incomplete answers from possible_polymorphic_call_targets. |
1857 | |
1858 | FIXME: disable for now; because ODR types are now build during |
1859 | streaming in, the variants do not need to be linked to the type, |
1860 | yet. We need to do the merging in cleanup pass to be implemented |
1861 | soon. */ |
1862 | if (!flag_ltrans && merge |
1863 | && 0 |
1864 | && TREE_CODE (val->type) == RECORD_TYPE |
1865 | && TREE_CODE (type) == RECORD_TYPE |
1866 | && TYPE_BINFO (val->type) && TYPE_BINFO (type) |
1867 | && TYPE_MAIN_VARIANT (type) == type |
1868 | && TYPE_MAIN_VARIANT (val->type) == val->type |
1869 | && BINFO_VTABLE (TYPE_BINFO (val->type)) |
1870 | && BINFO_VTABLE (TYPE_BINFO (type))) |
1871 | { |
1872 | tree master_binfo = TYPE_BINFO (val->type); |
1873 | tree v1 = BINFO_VTABLE (master_binfo); |
1874 | tree v2 = BINFO_VTABLE (TYPE_BINFO (type)); |
1875 | |
1876 | if (TREE_CODE (v1) == POINTER_PLUS_EXPR) |
1877 | { |
1878 | gcc_assert (TREE_CODE (v2) == POINTER_PLUS_EXPR |
1879 | && operand_equal_p (TREE_OPERAND (v1, 1), |
1880 | TREE_OPERAND (v2, 1), 0)); |
1881 | v1 = TREE_OPERAND (TREE_OPERAND (v1, 0), 0); |
1882 | v2 = TREE_OPERAND (TREE_OPERAND (v2, 0), 0); |
1883 | } |
1884 | gcc_assert (DECL_ASSEMBLER_NAME (v1) |
1885 | == DECL_ASSEMBLER_NAME (v2)); |
1886 | |
1887 | if (DECL_EXTERNAL (v1) && !DECL_EXTERNAL (v2)) |
1888 | { |
1889 | unsigned int i; |
1890 | |
1891 | set_type_binfo (type: val->type, TYPE_BINFO (type)); |
1892 | for (i = 0; i < val->types->length (); i++) |
1893 | { |
1894 | if (TYPE_BINFO ((*val->types)[i]) |
1895 | == master_binfo) |
1896 | set_type_binfo (type: (*val->types)[i], TYPE_BINFO (type)); |
1897 | } |
1898 | BINFO_TYPE (TYPE_BINFO (type)) = val->type; |
1899 | } |
1900 | else |
1901 | set_type_binfo (type, binfo: master_binfo); |
1902 | } |
1903 | return build_bases; |
1904 | } |
1905 | |
1906 | /* REF is OBJ_TYPE_REF, return the class the ref corresponds to. |
1907 | FOR_DUMP_P is true when being called from the dump routines. */ |
1908 | |
1909 | tree |
1910 | obj_type_ref_class (const_tree ref, bool for_dump_p) |
1911 | { |
1912 | gcc_checking_assert (TREE_CODE (ref) == OBJ_TYPE_REF); |
1913 | ref = TREE_TYPE (ref); |
1914 | gcc_checking_assert (TREE_CODE (ref) == POINTER_TYPE); |
1915 | ref = TREE_TYPE (ref); |
1916 | /* We look for type THIS points to. ObjC also builds |
1917 | OBJ_TYPE_REF with non-method calls, Their first parameter |
1918 | ID however also corresponds to class type. */ |
1919 | gcc_checking_assert (TREE_CODE (ref) == METHOD_TYPE |
1920 | || TREE_CODE (ref) == FUNCTION_TYPE); |
1921 | ref = TREE_VALUE (TYPE_ARG_TYPES (ref)); |
1922 | gcc_checking_assert (TREE_CODE (ref) == POINTER_TYPE); |
1923 | tree ret = TREE_TYPE (ref); |
1924 | if (!in_lto_p && !TYPE_STRUCTURAL_EQUALITY_P (ret)) |
1925 | ret = TYPE_CANONICAL (ret); |
1926 | else if (odr_type ot = get_odr_type (ret, insert: !for_dump_p)) |
1927 | ret = ot->type; |
1928 | else |
1929 | gcc_assert (for_dump_p); |
1930 | return ret; |
1931 | } |
1932 | |
1933 | /* Get ODR type hash entry for TYPE. If INSERT is true, create |
1934 | possibly new entry. */ |
1935 | |
1936 | odr_type |
1937 | get_odr_type (tree type, bool insert) |
1938 | { |
1939 | odr_type_d **slot = NULL; |
1940 | odr_type val = NULL; |
1941 | hashval_t hash; |
1942 | bool build_bases = false; |
1943 | bool insert_to_odr_array = false; |
1944 | int base_id = -1; |
1945 | |
1946 | type = TYPE_MAIN_VARIANT (type); |
1947 | if (!in_lto_p && !TYPE_STRUCTURAL_EQUALITY_P (type)) |
1948 | type = TYPE_CANONICAL (type); |
1949 | |
1950 | gcc_checking_assert (can_be_name_hashed_p (type)); |
1951 | |
1952 | hash = hash_odr_name (t: type); |
1953 | slot = odr_hash->find_slot_with_hash (comparable: type, hash, |
1954 | insert: insert ? INSERT : NO_INSERT); |
1955 | |
1956 | if (!slot) |
1957 | return NULL; |
1958 | |
1959 | /* See if we already have entry for type. */ |
1960 | if (*slot) |
1961 | { |
1962 | val = *slot; |
1963 | |
1964 | if (val->type != type && insert |
1965 | && (!val->types_set || !val->types_set->add (k: type))) |
1966 | build_bases = add_type_duplicate (val, type); |
1967 | } |
1968 | else |
1969 | { |
1970 | val = ggc_cleared_alloc<odr_type_d> (); |
1971 | val->type = type; |
1972 | val->bases = vNULL; |
1973 | val->derived_types = vNULL; |
1974 | if (type_with_linkage_p (t: type)) |
1975 | val->anonymous_namespace = type_in_anonymous_namespace_p (t: type); |
1976 | else |
1977 | val->anonymous_namespace = 0; |
1978 | build_bases = COMPLETE_TYPE_P (val->type); |
1979 | insert_to_odr_array = true; |
1980 | *slot = val; |
1981 | } |
1982 | |
1983 | if (build_bases && TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) |
1984 | && type_with_linkage_p (t: type) |
1985 | && type == TYPE_MAIN_VARIANT (type)) |
1986 | { |
1987 | tree binfo = TYPE_BINFO (type); |
1988 | unsigned int i; |
1989 | |
1990 | gcc_assert (BINFO_TYPE (TYPE_BINFO (val->type)) == type); |
1991 | |
1992 | val->all_derivations_known = type_all_derivations_known_p (t: type); |
1993 | for (i = 0; i < BINFO_N_BASE_BINFOS (binfo); i++) |
1994 | /* For now record only polymorphic types. other are |
1995 | pointless for devirtualization and we cannot precisely |
1996 | determine ODR equivalency of these during LTO. */ |
1997 | if (polymorphic_type_binfo_p (BINFO_BASE_BINFO (binfo, i))) |
1998 | { |
1999 | tree base_type= BINFO_TYPE (BINFO_BASE_BINFO (binfo, i)); |
2000 | odr_type base = get_odr_type (type: base_type, insert: true); |
2001 | gcc_assert (TYPE_MAIN_VARIANT (base_type) == base_type); |
2002 | base->derived_types.safe_push (obj: val); |
2003 | val->bases.safe_push (obj: base); |
2004 | if (base->id > base_id) |
2005 | base_id = base->id; |
2006 | } |
2007 | } |
2008 | /* Ensure that type always appears after bases. */ |
2009 | if (insert_to_odr_array) |
2010 | { |
2011 | if (odr_types_ptr) |
2012 | val->id = odr_types.length (); |
2013 | vec_safe_push (v&: odr_types_ptr, obj: val); |
2014 | } |
2015 | else if (base_id > val->id) |
2016 | { |
2017 | odr_types[val->id] = 0; |
2018 | /* Be sure we did not recorded any derived types; these may need |
2019 | renumbering too. */ |
2020 | gcc_assert (val->derived_types.length() == 0); |
2021 | val->id = odr_types.length (); |
2022 | vec_safe_push (v&: odr_types_ptr, obj: val); |
2023 | } |
2024 | return val; |
2025 | } |
2026 | |
2027 | /* Return type that in ODR type hash prevailed TYPE. Be careful and punt |
2028 | on ODR violations. */ |
2029 | |
2030 | tree |
2031 | prevailing_odr_type (tree type) |
2032 | { |
2033 | odr_type t = get_odr_type (type, insert: false); |
2034 | if (!t || t->odr_violated) |
2035 | return type; |
2036 | return t->type; |
2037 | } |
2038 | |
2039 | /* Set tbaa_enabled flag for TYPE. */ |
2040 | |
2041 | void |
2042 | enable_odr_based_tbaa (tree type) |
2043 | { |
2044 | odr_type t = get_odr_type (type, insert: true); |
2045 | t->tbaa_enabled = true; |
2046 | } |
2047 | |
2048 | /* True if canonical type of TYPE is determined using ODR name. */ |
2049 | |
2050 | bool |
2051 | odr_based_tbaa_p (const_tree type) |
2052 | { |
2053 | if (!RECORD_OR_UNION_TYPE_P (type)) |
2054 | return false; |
2055 | if (!odr_hash) |
2056 | return false; |
2057 | odr_type t = get_odr_type (type: const_cast <tree> (type), insert: false); |
2058 | if (!t || !t->tbaa_enabled) |
2059 | return false; |
2060 | return true; |
2061 | } |
2062 | |
2063 | /* Set TYPE_CANONICAL of type and all its variants and duplicates |
2064 | to CANONICAL. */ |
2065 | |
2066 | void |
2067 | set_type_canonical_for_odr_type (tree type, tree canonical) |
2068 | { |
2069 | odr_type t = get_odr_type (type, insert: false); |
2070 | unsigned int i; |
2071 | tree tt; |
2072 | |
2073 | for (tree t2 = t->type; t2; t2 = TYPE_NEXT_VARIANT (t2)) |
2074 | TYPE_CANONICAL (t2) = canonical; |
2075 | if (t->types) |
2076 | FOR_EACH_VEC_ELT (*t->types, i, tt) |
2077 | for (tree t2 = tt; t2; t2 = TYPE_NEXT_VARIANT (t2)) |
2078 | TYPE_CANONICAL (t2) = canonical; |
2079 | } |
2080 | |
2081 | /* Return true if we reported some ODR violation on TYPE. */ |
2082 | |
2083 | bool |
2084 | odr_type_violation_reported_p (tree type) |
2085 | { |
2086 | return get_odr_type (type, insert: false)->odr_violated; |
2087 | } |
2088 | |
2089 | /* Add TYPE of ODR type hash. */ |
2090 | |
2091 | void |
2092 | register_odr_type (tree type) |
2093 | { |
2094 | if (!odr_hash) |
2095 | odr_hash = new odr_hash_type (23); |
2096 | if (type == TYPE_MAIN_VARIANT (type)) |
2097 | { |
2098 | /* To get ODR warnings right, first register all sub-types. */ |
2099 | if (RECORD_OR_UNION_TYPE_P (type) |
2100 | && COMPLETE_TYPE_P (type)) |
2101 | { |
2102 | /* Limit recursion on types which are already registered. */ |
2103 | odr_type ot = get_odr_type (type, insert: false); |
2104 | if (ot |
2105 | && (ot->type == type |
2106 | || (ot->types_set |
2107 | && ot->types_set->contains (k: type)))) |
2108 | return; |
2109 | for (tree f = TYPE_FIELDS (type); f; f = TREE_CHAIN (f)) |
2110 | if (TREE_CODE (f) == FIELD_DECL) |
2111 | { |
2112 | tree subtype = TREE_TYPE (f); |
2113 | |
2114 | while (TREE_CODE (subtype) == ARRAY_TYPE) |
2115 | subtype = TREE_TYPE (subtype); |
2116 | if (type_with_linkage_p (TYPE_MAIN_VARIANT (subtype))) |
2117 | register_odr_type (TYPE_MAIN_VARIANT (subtype)); |
2118 | } |
2119 | if (TYPE_BINFO (type)) |
2120 | for (unsigned int i = 0; |
2121 | i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++) |
2122 | register_odr_type (BINFO_TYPE (BINFO_BASE_BINFO |
2123 | (TYPE_BINFO (type), i))); |
2124 | } |
2125 | get_odr_type (type, insert: true); |
2126 | } |
2127 | } |
2128 | |
2129 | /* Return true if type is known to have no derivations. */ |
2130 | |
2131 | bool |
2132 | type_known_to_have_no_derivations_p (tree t) |
2133 | { |
2134 | return (type_all_derivations_known_p (t) |
2135 | && (TYPE_FINAL_P (t) |
2136 | || (odr_hash |
2137 | && !get_odr_type (type: t, insert: true)->derived_types.length()))); |
2138 | } |
2139 | |
2140 | /* Dump ODR type T and all its derived types. INDENT specifies indentation for |
2141 | recursive printing. */ |
2142 | |
2143 | static void |
2144 | dump_odr_type (FILE *f, odr_type t, int indent=0) |
2145 | { |
2146 | unsigned int i; |
2147 | fprintf (stream: f, format: "%*s type %i: " , indent * 2, "" , t->id); |
2148 | print_generic_expr (f, t->type, TDF_SLIM); |
2149 | fprintf (stream: f, format: "%s" , t->anonymous_namespace ? " (anonymous namespace)" :"" ); |
2150 | fprintf (stream: f, format: "%s\n" , t->all_derivations_known ? " (derivations known)" :"" ); |
2151 | if (TYPE_NAME (t->type)) |
2152 | { |
2153 | if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t->type))) |
2154 | fprintf (stream: f, format: "%*s mangled name: %s\n" , indent * 2, "" , |
2155 | IDENTIFIER_POINTER |
2156 | (DECL_ASSEMBLER_NAME (TYPE_NAME (t->type)))); |
2157 | } |
2158 | if (t->bases.length ()) |
2159 | { |
2160 | fprintf (stream: f, format: "%*s base odr type ids: " , indent * 2, "" ); |
2161 | for (i = 0; i < t->bases.length (); i++) |
2162 | fprintf (stream: f, format: " %i" , t->bases[i]->id); |
2163 | fprintf (stream: f, format: "\n" ); |
2164 | } |
2165 | if (t->derived_types.length ()) |
2166 | { |
2167 | fprintf (stream: f, format: "%*s derived types:\n" , indent * 2, "" ); |
2168 | for (i = 0; i < t->derived_types.length (); i++) |
2169 | dump_odr_type (f, t: t->derived_types[i], indent: indent + 1); |
2170 | } |
2171 | fprintf (stream: f, format: "\n" ); |
2172 | } |
2173 | |
2174 | /* Dump the type inheritance graph. */ |
2175 | |
2176 | static void |
2177 | dump_type_inheritance_graph (FILE *f) |
2178 | { |
2179 | unsigned int i; |
2180 | unsigned int num_all_types = 0, num_types = 0, num_duplicates = 0; |
2181 | if (!odr_types_ptr) |
2182 | return; |
2183 | fprintf (stream: f, format: "\n\nType inheritance graph:\n" ); |
2184 | for (i = 0; i < odr_types.length (); i++) |
2185 | { |
2186 | if (odr_types[i] && odr_types[i]->bases.length () == 0) |
2187 | dump_odr_type (f, odr_types[i]); |
2188 | } |
2189 | for (i = 0; i < odr_types.length (); i++) |
2190 | { |
2191 | if (!odr_types[i]) |
2192 | continue; |
2193 | |
2194 | num_all_types++; |
2195 | if (!odr_types[i]->types || !odr_types[i]->types->length ()) |
2196 | continue; |
2197 | |
2198 | /* To aid ODR warnings we also mangle integer constants but do |
2199 | not consider duplicates there. */ |
2200 | if (TREE_CODE (odr_types[i]->type) == INTEGER_TYPE) |
2201 | continue; |
2202 | |
2203 | /* It is normal to have one duplicate and one normal variant. */ |
2204 | if (odr_types[i]->types->length () == 1 |
2205 | && COMPLETE_TYPE_P (odr_types[i]->type) |
2206 | && !COMPLETE_TYPE_P ((*odr_types[i]->types)[0])) |
2207 | continue; |
2208 | |
2209 | num_types ++; |
2210 | |
2211 | unsigned int j; |
2212 | fprintf (stream: f, format: "Duplicate tree types for odr type %i\n" , i); |
2213 | print_node (f, "" , odr_types[i]->type, 0); |
2214 | print_node (f, "" , TYPE_NAME (odr_types[i]->type), 0); |
2215 | putc (c: '\n',stream: f); |
2216 | for (j = 0; j < odr_types[i]->types->length (); j++) |
2217 | { |
2218 | tree t; |
2219 | num_duplicates ++; |
2220 | fprintf (stream: f, format: "duplicate #%i\n" , j); |
2221 | print_node (f, "" , (*odr_types[i]->types)[j], 0); |
2222 | t = (*odr_types[i]->types)[j]; |
2223 | while (TYPE_P (t) && TYPE_CONTEXT (t)) |
2224 | { |
2225 | t = TYPE_CONTEXT (t); |
2226 | print_node (f, "" , t, 0); |
2227 | } |
2228 | print_node (f, "" , TYPE_NAME ((*odr_types[i]->types)[j]), 0); |
2229 | putc (c: '\n',stream: f); |
2230 | } |
2231 | } |
2232 | fprintf (stream: f, format: "Out of %i types there are %i types with duplicates; " |
2233 | "%i duplicates overall\n" , num_all_types, num_types, num_duplicates); |
2234 | } |
2235 | |
2236 | /* Save some WPA->ltrans streaming by freeing stuff needed only for good |
2237 | ODR warnings. |
2238 | We make TYPE_DECLs to not point back |
2239 | to the type (which is needed to keep them in the same SCC and preserve |
2240 | location information to output warnings) and subsequently we make all |
2241 | TYPE_DECLS of same assembler name equivalent. */ |
2242 | |
2243 | static void |
2244 | free_odr_warning_data () |
2245 | { |
2246 | static bool odr_data_freed = false; |
2247 | |
2248 | if (odr_data_freed || !flag_wpa || !odr_types_ptr) |
2249 | return; |
2250 | |
2251 | odr_data_freed = true; |
2252 | |
2253 | for (unsigned int i = 0; i < odr_types.length (); i++) |
2254 | if (odr_types[i]) |
2255 | { |
2256 | tree t = odr_types[i]->type; |
2257 | |
2258 | TREE_TYPE (TYPE_NAME (t)) = void_type_node; |
2259 | |
2260 | if (odr_types[i]->types) |
2261 | for (unsigned int j = 0; j < odr_types[i]->types->length (); j++) |
2262 | { |
2263 | tree td = (*odr_types[i]->types)[j]; |
2264 | |
2265 | TYPE_NAME (td) = TYPE_NAME (t); |
2266 | } |
2267 | } |
2268 | odr_data_freed = true; |
2269 | } |
2270 | |
2271 | /* Initialize IPA devirt and build inheritance tree graph. */ |
2272 | |
2273 | void |
2274 | build_type_inheritance_graph (void) |
2275 | { |
2276 | struct symtab_node *n; |
2277 | FILE *inheritance_dump_file; |
2278 | dump_flags_t flags; |
2279 | |
2280 | if (odr_hash) |
2281 | { |
2282 | free_odr_warning_data (); |
2283 | return; |
2284 | } |
2285 | timevar_push (tv: TV_IPA_INHERITANCE); |
2286 | inheritance_dump_file = dump_begin (TDI_inheritance, &flags); |
2287 | odr_hash = new odr_hash_type (23); |
2288 | |
2289 | /* We reconstruct the graph starting of types of all methods seen in the |
2290 | unit. */ |
2291 | FOR_EACH_SYMBOL (n) |
2292 | if (is_a <cgraph_node *> (p: n) |
2293 | && DECL_VIRTUAL_P (n->decl) |
2294 | && n->real_symbol_p ()) |
2295 | get_odr_type (TYPE_METHOD_BASETYPE (TREE_TYPE (n->decl)), insert: true); |
2296 | |
2297 | /* Look also for virtual tables of types that do not define any methods. |
2298 | |
2299 | We need it in a case where class B has virtual base of class A |
2300 | re-defining its virtual method and there is class C with no virtual |
2301 | methods with B as virtual base. |
2302 | |
2303 | Here we output B's virtual method in two variant - for non-virtual |
2304 | and virtual inheritance. B's virtual table has non-virtual version, |
2305 | while C's has virtual. |
2306 | |
2307 | For this reason we need to know about C in order to include both |
2308 | variants of B. More correctly, record_target_from_binfo should |
2309 | add both variants of the method when walking B, but we have no |
2310 | link in between them. |
2311 | |
2312 | We rely on fact that either the method is exported and thus we |
2313 | assume it is called externally or C is in anonymous namespace and |
2314 | thus we will see the vtable. */ |
2315 | |
2316 | else if (is_a <varpool_node *> (p: n) |
2317 | && DECL_VIRTUAL_P (n->decl) |
2318 | && TREE_CODE (DECL_CONTEXT (n->decl)) == RECORD_TYPE |
2319 | && TYPE_BINFO (DECL_CONTEXT (n->decl)) |
2320 | && polymorphic_type_binfo_p (TYPE_BINFO (DECL_CONTEXT (n->decl)))) |
2321 | get_odr_type (TYPE_MAIN_VARIANT (DECL_CONTEXT (n->decl)), insert: true); |
2322 | if (inheritance_dump_file) |
2323 | { |
2324 | dump_type_inheritance_graph (f: inheritance_dump_file); |
2325 | dump_end (TDI_inheritance, inheritance_dump_file); |
2326 | } |
2327 | free_odr_warning_data (); |
2328 | timevar_pop (tv: TV_IPA_INHERITANCE); |
2329 | } |
2330 | |
2331 | /* Return true if N has reference from live virtual table |
2332 | (and thus can be a destination of polymorphic call). |
2333 | Be conservatively correct when callgraph is not built or |
2334 | if the method may be referred externally. */ |
2335 | |
2336 | static bool |
2337 | referenced_from_vtable_p (struct cgraph_node *node) |
2338 | { |
2339 | int i; |
2340 | struct ipa_ref *ref; |
2341 | bool found = false; |
2342 | |
2343 | if (node->externally_visible |
2344 | || DECL_EXTERNAL (node->decl) |
2345 | || node->used_from_other_partition) |
2346 | return true; |
2347 | |
2348 | /* Keep this test constant time. |
2349 | It is unlikely this can happen except for the case where speculative |
2350 | devirtualization introduced many speculative edges to this node. |
2351 | In this case the target is very likely alive anyway. */ |
2352 | if (node->ref_list.referring.length () > 100) |
2353 | return true; |
2354 | |
2355 | /* We need references built. */ |
2356 | if (symtab->state <= CONSTRUCTION) |
2357 | return true; |
2358 | |
2359 | for (i = 0; node->iterate_referring (i, ref); i++) |
2360 | if ((ref->use == IPA_REF_ALIAS |
2361 | && referenced_from_vtable_p (node: dyn_cast<cgraph_node *> (p: ref->referring))) |
2362 | || (ref->use == IPA_REF_ADDR |
2363 | && VAR_P (ref->referring->decl) |
2364 | && DECL_VIRTUAL_P (ref->referring->decl))) |
2365 | { |
2366 | found = true; |
2367 | break; |
2368 | } |
2369 | return found; |
2370 | } |
2371 | |
2372 | /* Return if TARGET is cxa_pure_virtual. */ |
2373 | |
2374 | static bool |
2375 | is_cxa_pure_virtual_p (tree target) |
2376 | { |
2377 | return target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE |
2378 | && DECL_NAME (target) |
2379 | && id_equal (DECL_NAME (target), |
2380 | str: "__cxa_pure_virtual" ); |
2381 | } |
2382 | |
2383 | /* If TARGET has associated node, record it in the NODES array. |
2384 | CAN_REFER specify if program can refer to the target directly. |
2385 | if TARGET is unknown (NULL) or it cannot be inserted (for example because |
2386 | its body was already removed and there is no way to refer to it), clear |
2387 | COMPLETEP. */ |
2388 | |
2389 | static void |
2390 | maybe_record_node (vec <cgraph_node *> &nodes, |
2391 | tree target, hash_set<tree> *inserted, |
2392 | bool can_refer, |
2393 | bool *completep) |
2394 | { |
2395 | struct cgraph_node *target_node, *alias_target; |
2396 | enum availability avail; |
2397 | bool pure_virtual = is_cxa_pure_virtual_p (target); |
2398 | |
2399 | /* __builtin_unreachable do not need to be added into |
2400 | list of targets; the runtime effect of calling them is undefined. |
2401 | Only "real" virtual methods should be accounted. */ |
2402 | if (target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE && !pure_virtual) |
2403 | return; |
2404 | |
2405 | if (!can_refer) |
2406 | { |
2407 | /* The only case when method of anonymous namespace becomes unreferable |
2408 | is when we completely optimized it out. */ |
2409 | if (flag_ltrans |
2410 | || !target |
2411 | || !type_in_anonymous_namespace_p (DECL_CONTEXT (target))) |
2412 | *completep = false; |
2413 | return; |
2414 | } |
2415 | |
2416 | if (!target) |
2417 | return; |
2418 | |
2419 | target_node = cgraph_node::get (decl: target); |
2420 | |
2421 | /* Prefer alias target over aliases, so we do not get confused by |
2422 | fake duplicates. */ |
2423 | if (target_node) |
2424 | { |
2425 | alias_target = target_node->ultimate_alias_target (availability: &avail); |
2426 | if (target_node != alias_target |
2427 | && avail >= AVAIL_AVAILABLE |
2428 | && target_node->get_availability ()) |
2429 | target_node = alias_target; |
2430 | } |
2431 | |
2432 | /* Method can only be called by polymorphic call if any |
2433 | of vtables referring to it are alive. |
2434 | |
2435 | While this holds for non-anonymous functions, too, there are |
2436 | cases where we want to keep them in the list; for example |
2437 | inline functions with -fno-weak are static, but we still |
2438 | may devirtualize them when instance comes from other unit. |
2439 | The same holds for LTO. |
2440 | |
2441 | Currently we ignore these functions in speculative devirtualization. |
2442 | ??? Maybe it would make sense to be more aggressive for LTO even |
2443 | elsewhere. */ |
2444 | if (!flag_ltrans |
2445 | && !pure_virtual |
2446 | && type_in_anonymous_namespace_p (DECL_CONTEXT (target)) |
2447 | && (!target_node |
2448 | || !referenced_from_vtable_p (node: target_node))) |
2449 | ; |
2450 | /* See if TARGET is useful function we can deal with. */ |
2451 | else if (target_node != NULL |
2452 | && (TREE_PUBLIC (target) |
2453 | || DECL_EXTERNAL (target) |
2454 | || target_node->definition) |
2455 | && target_node->real_symbol_p ()) |
2456 | { |
2457 | gcc_assert (!target_node->inlined_to); |
2458 | gcc_assert (target_node->real_symbol_p ()); |
2459 | /* When sanitizing, do not assume that __cxa_pure_virtual is not called |
2460 | by valid program. */ |
2461 | if (flag_sanitize & SANITIZE_UNREACHABLE) |
2462 | ; |
2463 | /* Only add pure virtual if it is the only possible target. This way |
2464 | we will preserve the diagnostics about pure virtual called in many |
2465 | cases without disabling optimization in other. */ |
2466 | else if (pure_virtual) |
2467 | { |
2468 | if (nodes.length ()) |
2469 | return; |
2470 | } |
2471 | /* If we found a real target, take away cxa_pure_virtual. */ |
2472 | else if (!pure_virtual && nodes.length () == 1 |
2473 | && is_cxa_pure_virtual_p (target: nodes[0]->decl)) |
2474 | nodes.pop (); |
2475 | if (pure_virtual && nodes.length ()) |
2476 | return; |
2477 | if (!inserted->add (k: target)) |
2478 | { |
2479 | cached_polymorphic_call_targets->add (k: target_node); |
2480 | nodes.safe_push (obj: target_node); |
2481 | } |
2482 | } |
2483 | else if (!completep) |
2484 | ; |
2485 | /* We have definition of __cxa_pure_virtual that is not accessible (it is |
2486 | optimized out or partitioned to other unit) so we cannot add it. When |
2487 | not sanitizing, there is nothing to do. |
2488 | Otherwise declare the list incomplete. */ |
2489 | else if (pure_virtual) |
2490 | { |
2491 | if (flag_sanitize & SANITIZE_UNREACHABLE) |
2492 | *completep = false; |
2493 | } |
2494 | else if (flag_ltrans |
2495 | || !type_in_anonymous_namespace_p (DECL_CONTEXT (target))) |
2496 | *completep = false; |
2497 | } |
2498 | |
2499 | /* See if BINFO's type matches OUTER_TYPE. If so, look up |
2500 | BINFO of subtype of OTR_TYPE at OFFSET and in that BINFO find |
2501 | method in vtable and insert method to NODES array |
2502 | or BASES_TO_CONSIDER if this array is non-NULL. |
2503 | Otherwise recurse to base BINFOs. |
2504 | This matches what get_binfo_at_offset does, but with offset |
2505 | being unknown. |
2506 | |
2507 | TYPE_BINFOS is a stack of BINFOS of types with defined |
2508 | virtual table seen on way from class type to BINFO. |
2509 | |
2510 | MATCHED_VTABLES tracks virtual tables we already did lookup |
2511 | for virtual function in. INSERTED tracks nodes we already |
2512 | inserted. |
2513 | |
2514 | ANONYMOUS is true if BINFO is part of anonymous namespace. |
2515 | |
2516 | Clear COMPLETEP when we hit unreferable target. |
2517 | */ |
2518 | |
2519 | static void |
2520 | record_target_from_binfo (vec <cgraph_node *> &nodes, |
2521 | vec <tree> *bases_to_consider, |
2522 | tree binfo, |
2523 | tree otr_type, |
2524 | vec <tree> &type_binfos, |
2525 | HOST_WIDE_INT otr_token, |
2526 | tree outer_type, |
2527 | HOST_WIDE_INT offset, |
2528 | hash_set<tree> *inserted, |
2529 | hash_set<tree> *matched_vtables, |
2530 | bool anonymous, |
2531 | bool *completep) |
2532 | { |
2533 | tree type = BINFO_TYPE (binfo); |
2534 | int i; |
2535 | tree base_binfo; |
2536 | |
2537 | |
2538 | if (BINFO_VTABLE (binfo)) |
2539 | type_binfos.safe_push (obj: binfo); |
2540 | if (types_same_for_odr (type1: type, type2: outer_type)) |
2541 | { |
2542 | int i; |
2543 | tree type_binfo = NULL; |
2544 | |
2545 | /* Look up BINFO with virtual table. For normal types it is always last |
2546 | binfo on stack. */ |
2547 | for (i = type_binfos.length () - 1; i >= 0; i--) |
2548 | if (BINFO_OFFSET (type_binfos[i]) == BINFO_OFFSET (binfo)) |
2549 | { |
2550 | type_binfo = type_binfos[i]; |
2551 | break; |
2552 | } |
2553 | if (BINFO_VTABLE (binfo)) |
2554 | type_binfos.pop (); |
2555 | /* If this is duplicated BINFO for base shared by virtual inheritance, |
2556 | we may not have its associated vtable. This is not a problem, since |
2557 | we will walk it on the other path. */ |
2558 | if (!type_binfo) |
2559 | return; |
2560 | tree inner_binfo = get_binfo_at_offset (type_binfo, |
2561 | offset, otr_type); |
2562 | if (!inner_binfo) |
2563 | { |
2564 | gcc_assert (odr_violation_reported); |
2565 | return; |
2566 | } |
2567 | /* For types in anonymous namespace first check if the respective vtable |
2568 | is alive. If not, we know the type can't be called. */ |
2569 | if (!flag_ltrans && anonymous) |
2570 | { |
2571 | tree vtable = BINFO_VTABLE (inner_binfo); |
2572 | varpool_node *vnode; |
2573 | |
2574 | if (TREE_CODE (vtable) == POINTER_PLUS_EXPR) |
2575 | vtable = TREE_OPERAND (TREE_OPERAND (vtable, 0), 0); |
2576 | vnode = varpool_node::get (decl: vtable); |
2577 | if (!vnode || !vnode->definition) |
2578 | return; |
2579 | } |
2580 | gcc_assert (inner_binfo); |
2581 | if (bases_to_consider |
2582 | ? !matched_vtables->contains (BINFO_VTABLE (inner_binfo)) |
2583 | : !matched_vtables->add (BINFO_VTABLE (inner_binfo))) |
2584 | { |
2585 | bool can_refer; |
2586 | tree target = gimple_get_virt_method_for_binfo (otr_token, |
2587 | inner_binfo, |
2588 | can_refer: &can_refer); |
2589 | if (!bases_to_consider) |
2590 | maybe_record_node (nodes, target, inserted, can_refer, completep); |
2591 | /* Destructors are never called via construction vtables. */ |
2592 | else if (!target || !DECL_CXX_DESTRUCTOR_P (target)) |
2593 | bases_to_consider->safe_push (obj: target); |
2594 | } |
2595 | return; |
2596 | } |
2597 | |
2598 | /* Walk bases. */ |
2599 | for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) |
2600 | /* Walking bases that have no virtual method is pointless exercise. */ |
2601 | if (polymorphic_type_binfo_p (binfo: base_binfo)) |
2602 | record_target_from_binfo (nodes, bases_to_consider, binfo: base_binfo, otr_type, |
2603 | type_binfos, |
2604 | otr_token, outer_type, offset, inserted, |
2605 | matched_vtables, anonymous, completep); |
2606 | if (BINFO_VTABLE (binfo)) |
2607 | type_binfos.pop (); |
2608 | } |
2609 | |
2610 | /* Look up virtual methods matching OTR_TYPE (with OFFSET and OTR_TOKEN) |
2611 | of TYPE, insert them to NODES, recurse into derived nodes. |
2612 | INSERTED is used to avoid duplicate insertions of methods into NODES. |
2613 | MATCHED_VTABLES are used to avoid duplicate walking vtables. |
2614 | Clear COMPLETEP if unreferable target is found. |
2615 | |
2616 | If CONSIDER_CONSTRUCTION is true, record to BASES_TO_CONSIDER |
2617 | all cases where BASE_SKIPPED is true (because the base is abstract |
2618 | class). */ |
2619 | |
2620 | static void |
2621 | possible_polymorphic_call_targets_1 (vec <cgraph_node *> &nodes, |
2622 | hash_set<tree> *inserted, |
2623 | hash_set<tree> *matched_vtables, |
2624 | tree otr_type, |
2625 | odr_type type, |
2626 | HOST_WIDE_INT otr_token, |
2627 | tree outer_type, |
2628 | HOST_WIDE_INT offset, |
2629 | bool *completep, |
2630 | vec <tree> &bases_to_consider, |
2631 | bool consider_construction) |
2632 | { |
2633 | tree binfo = TYPE_BINFO (type->type); |
2634 | unsigned int i; |
2635 | auto_vec <tree, 8> type_binfos; |
2636 | bool possibly_instantiated = type_possibly_instantiated_p (t: type->type); |
2637 | |
2638 | /* We may need to consider types w/o instances because of possible derived |
2639 | types using their methods either directly or via construction vtables. |
2640 | We are safe to skip them when all derivations are known, since we will |
2641 | handle them later. |
2642 | This is done by recording them to BASES_TO_CONSIDER array. */ |
2643 | if (possibly_instantiated || consider_construction) |
2644 | { |
2645 | record_target_from_binfo (nodes, |
2646 | bases_to_consider: (!possibly_instantiated |
2647 | && type_all_derivations_known_p (t: type->type)) |
2648 | ? &bases_to_consider : NULL, |
2649 | binfo, otr_type, type_binfos, otr_token, |
2650 | outer_type, offset, |
2651 | inserted, matched_vtables, |
2652 | anonymous: type->anonymous_namespace, completep); |
2653 | } |
2654 | for (i = 0; i < type->derived_types.length (); i++) |
2655 | possible_polymorphic_call_targets_1 (nodes, inserted, |
2656 | matched_vtables, |
2657 | otr_type, |
2658 | type: type->derived_types[i], |
2659 | otr_token, outer_type, offset, completep, |
2660 | bases_to_consider, consider_construction); |
2661 | } |
2662 | |
2663 | /* Cache of queries for polymorphic call targets. |
2664 | |
2665 | Enumerating all call targets may get expensive when there are many |
2666 | polymorphic calls in the program, so we memoize all the previous |
2667 | queries and avoid duplicated work. */ |
2668 | |
2669 | class polymorphic_call_target_d |
2670 | { |
2671 | public: |
2672 | HOST_WIDE_INT otr_token; |
2673 | ipa_polymorphic_call_context context; |
2674 | odr_type type; |
2675 | vec <cgraph_node *> targets; |
2676 | tree decl_warning; |
2677 | int type_warning; |
2678 | unsigned int n_odr_types; |
2679 | bool complete; |
2680 | bool speculative; |
2681 | }; |
2682 | |
2683 | /* Polymorphic call target cache helpers. */ |
2684 | |
2685 | struct polymorphic_call_target_hasher |
2686 | : pointer_hash <polymorphic_call_target_d> |
2687 | { |
2688 | static inline hashval_t hash (const polymorphic_call_target_d *); |
2689 | static inline bool equal (const polymorphic_call_target_d *, |
2690 | const polymorphic_call_target_d *); |
2691 | static inline void remove (polymorphic_call_target_d *); |
2692 | }; |
2693 | |
2694 | /* Return the computed hashcode for ODR_QUERY. */ |
2695 | |
2696 | inline hashval_t |
2697 | polymorphic_call_target_hasher::hash (const polymorphic_call_target_d *odr_query) |
2698 | { |
2699 | inchash::hash hstate (odr_query->otr_token); |
2700 | |
2701 | hstate.add_hwi (v: odr_query->type->id); |
2702 | hstate.merge_hash (TYPE_UID (odr_query->context.outer_type)); |
2703 | hstate.add_hwi (v: odr_query->context.offset); |
2704 | hstate.add_hwi (v: odr_query->n_odr_types); |
2705 | |
2706 | if (odr_query->context.speculative_outer_type) |
2707 | { |
2708 | hstate.merge_hash (TYPE_UID (odr_query->context.speculative_outer_type)); |
2709 | hstate.add_hwi (v: odr_query->context.speculative_offset); |
2710 | } |
2711 | hstate.add_flag (flag: odr_query->speculative); |
2712 | hstate.add_flag (flag: odr_query->context.maybe_in_construction); |
2713 | hstate.add_flag (flag: odr_query->context.maybe_derived_type); |
2714 | hstate.add_flag (flag: odr_query->context.speculative_maybe_derived_type); |
2715 | hstate.commit_flag (); |
2716 | return hstate.end (); |
2717 | } |
2718 | |
2719 | /* Compare cache entries T1 and T2. */ |
2720 | |
2721 | inline bool |
2722 | polymorphic_call_target_hasher::equal (const polymorphic_call_target_d *t1, |
2723 | const polymorphic_call_target_d *t2) |
2724 | { |
2725 | return (t1->type == t2->type && t1->otr_token == t2->otr_token |
2726 | && t1->speculative == t2->speculative |
2727 | && t1->context.offset == t2->context.offset |
2728 | && t1->context.speculative_offset == t2->context.speculative_offset |
2729 | && t1->context.outer_type == t2->context.outer_type |
2730 | && t1->context.speculative_outer_type == t2->context.speculative_outer_type |
2731 | && t1->context.maybe_in_construction |
2732 | == t2->context.maybe_in_construction |
2733 | && t1->context.maybe_derived_type == t2->context.maybe_derived_type |
2734 | && (t1->context.speculative_maybe_derived_type |
2735 | == t2->context.speculative_maybe_derived_type) |
2736 | /* Adding new type may affect outcome of target search. */ |
2737 | && t1->n_odr_types == t2->n_odr_types); |
2738 | } |
2739 | |
2740 | /* Remove entry in polymorphic call target cache hash. */ |
2741 | |
2742 | inline void |
2743 | polymorphic_call_target_hasher::remove (polymorphic_call_target_d *v) |
2744 | { |
2745 | v->targets.release (); |
2746 | free (ptr: v); |
2747 | } |
2748 | |
2749 | /* Polymorphic call target query cache. */ |
2750 | |
2751 | typedef hash_table<polymorphic_call_target_hasher> |
2752 | polymorphic_call_target_hash_type; |
2753 | static polymorphic_call_target_hash_type *polymorphic_call_target_hash; |
2754 | |
2755 | /* Destroy polymorphic call target query cache. */ |
2756 | |
2757 | static void |
2758 | free_polymorphic_call_targets_hash () |
2759 | { |
2760 | if (cached_polymorphic_call_targets) |
2761 | { |
2762 | delete polymorphic_call_target_hash; |
2763 | polymorphic_call_target_hash = NULL; |
2764 | delete cached_polymorphic_call_targets; |
2765 | cached_polymorphic_call_targets = NULL; |
2766 | } |
2767 | } |
2768 | |
2769 | /* Force rebuilding type inheritance graph from scratch. |
2770 | This is use to make sure that we do not keep references to types |
2771 | which was not visible to free_lang_data. */ |
2772 | |
2773 | void |
2774 | rebuild_type_inheritance_graph () |
2775 | { |
2776 | if (!odr_hash) |
2777 | return; |
2778 | delete odr_hash; |
2779 | odr_hash = NULL; |
2780 | odr_types_ptr = NULL; |
2781 | free_polymorphic_call_targets_hash (); |
2782 | } |
2783 | |
2784 | /* When virtual function is removed, we may need to flush the cache. */ |
2785 | |
2786 | static void |
2787 | devirt_node_removal_hook (struct cgraph_node *n, void *d ATTRIBUTE_UNUSED) |
2788 | { |
2789 | if (cached_polymorphic_call_targets |
2790 | && !thunk_expansion |
2791 | && cached_polymorphic_call_targets->contains (k: n)) |
2792 | free_polymorphic_call_targets_hash (); |
2793 | } |
2794 | |
2795 | /* Look up base of BINFO that has virtual table VTABLE with OFFSET. */ |
2796 | |
2797 | tree |
2798 | subbinfo_with_vtable_at_offset (tree binfo, unsigned HOST_WIDE_INT offset, |
2799 | tree vtable) |
2800 | { |
2801 | tree v = BINFO_VTABLE (binfo); |
2802 | int i; |
2803 | tree base_binfo; |
2804 | unsigned HOST_WIDE_INT this_offset; |
2805 | |
2806 | if (v) |
2807 | { |
2808 | if (!vtable_pointer_value_to_vtable (v, &v, &this_offset)) |
2809 | gcc_unreachable (); |
2810 | |
2811 | if (offset == this_offset |
2812 | && DECL_ASSEMBLER_NAME (v) == DECL_ASSEMBLER_NAME (vtable)) |
2813 | return binfo; |
2814 | } |
2815 | |
2816 | for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++) |
2817 | if (polymorphic_type_binfo_p (binfo: base_binfo)) |
2818 | { |
2819 | base_binfo = subbinfo_with_vtable_at_offset (binfo: base_binfo, offset, vtable); |
2820 | if (base_binfo) |
2821 | return base_binfo; |
2822 | } |
2823 | return NULL; |
2824 | } |
2825 | |
2826 | /* T is known constant value of virtual table pointer. |
2827 | Store virtual table to V and its offset to OFFSET. |
2828 | Return false if T does not look like virtual table reference. */ |
2829 | |
2830 | bool |
2831 | vtable_pointer_value_to_vtable (const_tree t, tree *v, |
2832 | unsigned HOST_WIDE_INT *offset) |
2833 | { |
2834 | /* We expect &MEM[(void *)&virtual_table + 16B]. |
2835 | We obtain object's BINFO from the context of the virtual table. |
2836 | This one contains pointer to virtual table represented via |
2837 | POINTER_PLUS_EXPR. Verify that this pointer matches what |
2838 | we propagated through. |
2839 | |
2840 | In the case of virtual inheritance, the virtual tables may |
2841 | be nested, i.e. the offset may be different from 16 and we may |
2842 | need to dive into the type representation. */ |
2843 | if (TREE_CODE (t) == ADDR_EXPR |
2844 | && TREE_CODE (TREE_OPERAND (t, 0)) == MEM_REF |
2845 | && TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == ADDR_EXPR |
2846 | && TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 1)) == INTEGER_CST |
2847 | && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 0)) |
2848 | == VAR_DECL) |
2849 | && DECL_VIRTUAL_P (TREE_OPERAND (TREE_OPERAND |
2850 | (TREE_OPERAND (t, 0), 0), 0))) |
2851 | { |
2852 | *v = TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 0); |
2853 | *offset = tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (t, 0), 1)); |
2854 | return true; |
2855 | } |
2856 | |
2857 | /* Alternative representation, used by C++ frontend is POINTER_PLUS_EXPR. |
2858 | We need to handle it when T comes from static variable initializer or |
2859 | BINFO. */ |
2860 | if (TREE_CODE (t) == POINTER_PLUS_EXPR) |
2861 | { |
2862 | *offset = tree_to_uhwi (TREE_OPERAND (t, 1)); |
2863 | t = TREE_OPERAND (t, 0); |
2864 | } |
2865 | else |
2866 | *offset = 0; |
2867 | |
2868 | if (TREE_CODE (t) != ADDR_EXPR) |
2869 | return false; |
2870 | *v = TREE_OPERAND (t, 0); |
2871 | return true; |
2872 | } |
2873 | |
2874 | /* T is known constant value of virtual table pointer. Return BINFO of the |
2875 | instance type. */ |
2876 | |
2877 | tree |
2878 | vtable_pointer_value_to_binfo (const_tree t) |
2879 | { |
2880 | tree vtable; |
2881 | unsigned HOST_WIDE_INT offset; |
2882 | |
2883 | if (!vtable_pointer_value_to_vtable (t, v: &vtable, offset: &offset)) |
2884 | return NULL_TREE; |
2885 | |
2886 | /* FIXME: for stores of construction vtables we return NULL, |
2887 | because we do not have BINFO for those. Eventually we should fix |
2888 | our representation to allow this case to be handled, too. |
2889 | In the case we see store of BINFO we however may assume |
2890 | that standard folding will be able to cope with it. */ |
2891 | return subbinfo_with_vtable_at_offset (TYPE_BINFO (DECL_CONTEXT (vtable)), |
2892 | offset, vtable); |
2893 | } |
2894 | |
2895 | /* Walk bases of OUTER_TYPE that contain OTR_TYPE at OFFSET. |
2896 | Look up their respective virtual methods for OTR_TOKEN and OTR_TYPE |
2897 | and insert them in NODES. |
2898 | |
2899 | MATCHED_VTABLES and INSERTED is used to avoid duplicated work. */ |
2900 | |
2901 | static void |
2902 | record_targets_from_bases (tree otr_type, |
2903 | HOST_WIDE_INT otr_token, |
2904 | tree outer_type, |
2905 | HOST_WIDE_INT offset, |
2906 | vec <cgraph_node *> &nodes, |
2907 | hash_set<tree> *inserted, |
2908 | hash_set<tree> *matched_vtables, |
2909 | bool *completep) |
2910 | { |
2911 | while (true) |
2912 | { |
2913 | HOST_WIDE_INT pos, size; |
2914 | tree base_binfo; |
2915 | tree fld; |
2916 | |
2917 | if (types_same_for_odr (type1: outer_type, type2: otr_type)) |
2918 | return; |
2919 | |
2920 | for (fld = TYPE_FIELDS (outer_type); fld; fld = DECL_CHAIN (fld)) |
2921 | { |
2922 | if (TREE_CODE (fld) != FIELD_DECL) |
2923 | continue; |
2924 | |
2925 | pos = int_bit_position (field: fld); |
2926 | size = tree_to_shwi (DECL_SIZE (fld)); |
2927 | if (pos <= offset && (pos + size) > offset |
2928 | /* Do not get confused by zero sized bases. */ |
2929 | && polymorphic_type_binfo_p (TYPE_BINFO (TREE_TYPE (fld)))) |
2930 | break; |
2931 | } |
2932 | /* Within a class type we should always find corresponding fields. */ |
2933 | gcc_assert (fld && TREE_CODE (TREE_TYPE (fld)) == RECORD_TYPE); |
2934 | |
2935 | /* Nonbase types should have been stripped by outer_class_type. */ |
2936 | gcc_assert (DECL_ARTIFICIAL (fld)); |
2937 | |
2938 | outer_type = TREE_TYPE (fld); |
2939 | offset -= pos; |
2940 | |
2941 | base_binfo = get_binfo_at_offset (TYPE_BINFO (outer_type), |
2942 | offset, otr_type); |
2943 | if (!base_binfo) |
2944 | { |
2945 | gcc_assert (odr_violation_reported); |
2946 | return; |
2947 | } |
2948 | gcc_assert (base_binfo); |
2949 | if (!matched_vtables->add (BINFO_VTABLE (base_binfo))) |
2950 | { |
2951 | bool can_refer; |
2952 | tree target = gimple_get_virt_method_for_binfo (otr_token, |
2953 | base_binfo, |
2954 | can_refer: &can_refer); |
2955 | if (!target || ! DECL_CXX_DESTRUCTOR_P (target)) |
2956 | maybe_record_node (nodes, target, inserted, can_refer, completep); |
2957 | matched_vtables->add (BINFO_VTABLE (base_binfo)); |
2958 | } |
2959 | } |
2960 | } |
2961 | |
2962 | /* When virtual table is removed, we may need to flush the cache. */ |
2963 | |
2964 | static void |
2965 | devirt_variable_node_removal_hook (varpool_node *n, |
2966 | void *d ATTRIBUTE_UNUSED) |
2967 | { |
2968 | if (cached_polymorphic_call_targets |
2969 | && DECL_VIRTUAL_P (n->decl) |
2970 | && type_in_anonymous_namespace_p (DECL_CONTEXT (n->decl))) |
2971 | free_polymorphic_call_targets_hash (); |
2972 | } |
2973 | |
2974 | /* Record about how many calls would benefit from given type to be final. */ |
2975 | |
2976 | struct odr_type_warn_count |
2977 | { |
2978 | tree type; |
2979 | int count; |
2980 | profile_count dyn_count; |
2981 | }; |
2982 | |
2983 | /* Record about how many calls would benefit from given method to be final. */ |
2984 | |
2985 | struct decl_warn_count |
2986 | { |
2987 | tree decl; |
2988 | int count; |
2989 | profile_count dyn_count; |
2990 | }; |
2991 | |
2992 | /* Information about type and decl warnings. */ |
2993 | |
2994 | class final_warning_record |
2995 | { |
2996 | public: |
2997 | /* If needed grow type_warnings vector and initialize new decl_warn_count |
2998 | to have dyn_count set to profile_count::zero (). */ |
2999 | void grow_type_warnings (unsigned newlen); |
3000 | |
3001 | profile_count dyn_count; |
3002 | auto_vec<odr_type_warn_count> type_warnings; |
3003 | hash_map<tree, decl_warn_count> decl_warnings; |
3004 | }; |
3005 | |
3006 | void |
3007 | final_warning_record::grow_type_warnings (unsigned newlen) |
3008 | { |
3009 | unsigned len = type_warnings.length (); |
3010 | if (newlen > len) |
3011 | { |
3012 | type_warnings.safe_grow_cleared (len: newlen, exact: true); |
3013 | for (unsigned i = len; i < newlen; i++) |
3014 | type_warnings[i].dyn_count = profile_count::zero (); |
3015 | } |
3016 | } |
3017 | |
3018 | class final_warning_record *final_warning_records; |
3019 | |
3020 | /* Return vector containing possible targets of polymorphic call of type |
3021 | OTR_TYPE calling method OTR_TOKEN within type of OTR_OUTER_TYPE and OFFSET. |
3022 | If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing |
3023 | OTR_TYPE and include their virtual method. This is useful for types |
3024 | possibly in construction or destruction where the virtual table may |
3025 | temporarily change to one of base types. INCLUDE_DERIVED_TYPES make |
3026 | us to walk the inheritance graph for all derivations. |
3027 | |
3028 | If COMPLETEP is non-NULL, store true if the list is complete. |
3029 | CACHE_TOKEN (if non-NULL) will get stored to an unique ID of entry |
3030 | in the target cache. If user needs to visit every target list |
3031 | just once, it can memoize them. |
3032 | |
3033 | If SPECULATIVE is set, the list will not contain targets that |
3034 | are not speculatively taken. |
3035 | |
3036 | Returned vector is placed into cache. It is NOT caller's responsibility |
3037 | to free it. The vector can be freed on cgraph_remove_node call if |
3038 | the particular node is a virtual function present in the cache. */ |
3039 | |
3040 | vec <cgraph_node *> |
3041 | possible_polymorphic_call_targets (tree otr_type, |
3042 | HOST_WIDE_INT otr_token, |
3043 | ipa_polymorphic_call_context context, |
3044 | bool *completep, |
3045 | void **cache_token, |
3046 | bool speculative) |
3047 | { |
3048 | static struct cgraph_node_hook_list *node_removal_hook_holder; |
3049 | vec <cgraph_node *> nodes = vNULL; |
3050 | auto_vec <tree, 8> bases_to_consider; |
3051 | odr_type type, outer_type; |
3052 | polymorphic_call_target_d key; |
3053 | polymorphic_call_target_d **slot; |
3054 | unsigned int i; |
3055 | tree binfo, target; |
3056 | bool complete; |
3057 | bool can_refer = false; |
3058 | bool skipped = false; |
3059 | |
3060 | otr_type = TYPE_MAIN_VARIANT (otr_type); |
3061 | |
3062 | /* If ODR is not initialized or the context is invalid, return empty |
3063 | incomplete list. */ |
3064 | if (!odr_hash || context.invalid || !TYPE_BINFO (otr_type)) |
3065 | { |
3066 | if (completep) |
3067 | *completep = context.invalid; |
3068 | if (cache_token) |
3069 | *cache_token = NULL; |
3070 | return nodes; |
3071 | } |
3072 | |
3073 | /* Do not bother to compute speculative info when user do not asks for it. */ |
3074 | if (!speculative || !context.speculative_outer_type) |
3075 | context.clear_speculation (); |
3076 | |
3077 | type = get_odr_type (type: otr_type, insert: true); |
3078 | |
3079 | /* Recording type variants would waste results cache. */ |
3080 | gcc_assert (!context.outer_type |
3081 | || TYPE_MAIN_VARIANT (context.outer_type) == context.outer_type); |
3082 | |
3083 | /* Look up the outer class type we want to walk. |
3084 | If we fail to do so, the context is invalid. */ |
3085 | if ((context.outer_type || context.speculative_outer_type) |
3086 | && !context.restrict_to_inner_class (otr_type)) |
3087 | { |
3088 | if (completep) |
3089 | *completep = true; |
3090 | if (cache_token) |
3091 | *cache_token = NULL; |
3092 | return nodes; |
3093 | } |
3094 | gcc_assert (!context.invalid); |
3095 | |
3096 | /* Check that restrict_to_inner_class kept the main variant. */ |
3097 | gcc_assert (!context.outer_type |
3098 | || TYPE_MAIN_VARIANT (context.outer_type) == context.outer_type); |
3099 | |
3100 | /* We canonicalize our query, so we do not need extra hashtable entries. */ |
3101 | |
3102 | /* Without outer type, we have no use for offset. Just do the |
3103 | basic search from inner type. */ |
3104 | if (!context.outer_type) |
3105 | context.clear_outer_type (otr_type); |
3106 | /* We need to update our hierarchy if the type does not exist. */ |
3107 | outer_type = get_odr_type (type: context.outer_type, insert: true); |
3108 | /* If the type is complete, there are no derivations. */ |
3109 | if (TYPE_FINAL_P (outer_type->type)) |
3110 | context.maybe_derived_type = false; |
3111 | |
3112 | /* Initialize query cache. */ |
3113 | if (!cached_polymorphic_call_targets) |
3114 | { |
3115 | cached_polymorphic_call_targets = new hash_set<cgraph_node *>; |
3116 | polymorphic_call_target_hash |
3117 | = new polymorphic_call_target_hash_type (23); |
3118 | if (!node_removal_hook_holder) |
3119 | { |
3120 | node_removal_hook_holder = |
3121 | symtab->add_cgraph_removal_hook (hook: &devirt_node_removal_hook, NULL); |
3122 | symtab->add_varpool_removal_hook (hook: &devirt_variable_node_removal_hook, |
3123 | NULL); |
3124 | } |
3125 | } |
3126 | |
3127 | if (in_lto_p) |
3128 | { |
3129 | if (context.outer_type != otr_type) |
3130 | context.outer_type |
3131 | = get_odr_type (type: context.outer_type, insert: true)->type; |
3132 | if (context.speculative_outer_type) |
3133 | context.speculative_outer_type |
3134 | = get_odr_type (type: context.speculative_outer_type, insert: true)->type; |
3135 | } |
3136 | |
3137 | /* Look up cached answer. */ |
3138 | key.type = type; |
3139 | key.otr_token = otr_token; |
3140 | key.speculative = speculative; |
3141 | key.context = context; |
3142 | key.n_odr_types = odr_types.length (); |
3143 | slot = polymorphic_call_target_hash->find_slot (value: &key, insert: INSERT); |
3144 | if (cache_token) |
3145 | *cache_token = (void *)*slot; |
3146 | if (*slot) |
3147 | { |
3148 | if (completep) |
3149 | *completep = (*slot)->complete; |
3150 | if ((*slot)->type_warning && final_warning_records) |
3151 | { |
3152 | final_warning_records->type_warnings[(*slot)->type_warning - 1].count++; |
3153 | if (!final_warning_records->type_warnings |
3154 | [(*slot)->type_warning - 1].dyn_count.initialized_p ()) |
3155 | final_warning_records->type_warnings |
3156 | [(*slot)->type_warning - 1].dyn_count = profile_count::zero (); |
3157 | if (final_warning_records->dyn_count > 0) |
3158 | final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count |
3159 | = final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count |
3160 | + final_warning_records->dyn_count; |
3161 | } |
3162 | if (!speculative && (*slot)->decl_warning && final_warning_records) |
3163 | { |
3164 | struct decl_warn_count *c = |
3165 | final_warning_records->decl_warnings.get (k: (*slot)->decl_warning); |
3166 | c->count++; |
3167 | if (final_warning_records->dyn_count > 0) |
3168 | c->dyn_count += final_warning_records->dyn_count; |
3169 | } |
3170 | return (*slot)->targets; |
3171 | } |
3172 | |
3173 | complete = true; |
3174 | |
3175 | /* Do actual search. */ |
3176 | timevar_push (tv: TV_IPA_VIRTUAL_CALL); |
3177 | *slot = XCNEW (polymorphic_call_target_d); |
3178 | if (cache_token) |
3179 | *cache_token = (void *)*slot; |
3180 | (*slot)->type = type; |
3181 | (*slot)->otr_token = otr_token; |
3182 | (*slot)->context = context; |
3183 | (*slot)->speculative = speculative; |
3184 | |
3185 | hash_set<tree> inserted; |
3186 | hash_set<tree> matched_vtables; |
3187 | |
3188 | /* First insert targets we speculatively identified as likely. */ |
3189 | if (context.speculative_outer_type) |
3190 | { |
3191 | odr_type speculative_outer_type; |
3192 | bool speculation_complete = true; |
3193 | bool check_derived_types = false; |
3194 | |
3195 | /* First insert target from type itself and check if it may have |
3196 | derived types. */ |
3197 | speculative_outer_type = get_odr_type (type: context.speculative_outer_type, insert: true); |
3198 | if (TYPE_FINAL_P (speculative_outer_type->type)) |
3199 | context.speculative_maybe_derived_type = false; |
3200 | binfo = get_binfo_at_offset (TYPE_BINFO (speculative_outer_type->type), |
3201 | context.speculative_offset, otr_type); |
3202 | if (binfo) |
3203 | target = gimple_get_virt_method_for_binfo (otr_token, binfo, |
3204 | can_refer: &can_refer); |
3205 | else |
3206 | target = NULL; |
3207 | |
3208 | /* In the case we get complete method, we don't need |
3209 | to walk derivations. */ |
3210 | if (target && DECL_FINAL_P (target)) |
3211 | context.speculative_maybe_derived_type = false; |
3212 | if (check_derived_types |
3213 | ? type_or_derived_type_possibly_instantiated_p |
3214 | (t: speculative_outer_type) |
3215 | : type_possibly_instantiated_p (t: speculative_outer_type->type)) |
3216 | maybe_record_node (nodes, target, inserted: &inserted, can_refer, |
3217 | completep: &speculation_complete); |
3218 | if (binfo) |
3219 | matched_vtables.add (BINFO_VTABLE (binfo)); |
3220 | |
3221 | |
3222 | /* Next walk recursively all derived types. */ |
3223 | if (context.speculative_maybe_derived_type) |
3224 | for (i = 0; i < speculative_outer_type->derived_types.length(); i++) |
3225 | possible_polymorphic_call_targets_1 (nodes, inserted: &inserted, |
3226 | matched_vtables: &matched_vtables, |
3227 | otr_type, |
3228 | type: speculative_outer_type->derived_types[i], |
3229 | otr_token, outer_type: speculative_outer_type->type, |
3230 | offset: context.speculative_offset, |
3231 | completep: &speculation_complete, |
3232 | bases_to_consider, |
3233 | consider_construction: false); |
3234 | } |
3235 | |
3236 | if (!speculative || !nodes.length ()) |
3237 | { |
3238 | bool check_derived_types = false; |
3239 | /* First see virtual method of type itself. */ |
3240 | binfo = get_binfo_at_offset (TYPE_BINFO (outer_type->type), |
3241 | context.offset, otr_type); |
3242 | if (binfo) |
3243 | target = gimple_get_virt_method_for_binfo (otr_token, binfo, |
3244 | can_refer: &can_refer); |
3245 | else |
3246 | { |
3247 | gcc_assert (odr_violation_reported); |
3248 | target = NULL; |
3249 | } |
3250 | |
3251 | /* Destructors are never called through construction virtual tables, |
3252 | because the type is always known. */ |
3253 | if (target && DECL_CXX_DESTRUCTOR_P (target)) |
3254 | context.maybe_in_construction = false; |
3255 | |
3256 | /* In the case we get complete method, we don't need |
3257 | to walk derivations. */ |
3258 | if (target && DECL_FINAL_P (target)) |
3259 | { |
3260 | check_derived_types = true; |
3261 | context.maybe_derived_type = false; |
3262 | } |
3263 | |
3264 | /* If OUTER_TYPE is abstract, we know we are not seeing its instance. */ |
3265 | if (check_derived_types |
3266 | ? type_or_derived_type_possibly_instantiated_p (t: outer_type) |
3267 | : type_possibly_instantiated_p (t: outer_type->type)) |
3268 | maybe_record_node (nodes, target, inserted: &inserted, can_refer, completep: &complete); |
3269 | else |
3270 | skipped = true; |
3271 | |
3272 | if (binfo) |
3273 | matched_vtables.add (BINFO_VTABLE (binfo)); |
3274 | |
3275 | /* Next walk recursively all derived types. */ |
3276 | if (context.maybe_derived_type) |
3277 | { |
3278 | for (i = 0; i < outer_type->derived_types.length(); i++) |
3279 | possible_polymorphic_call_targets_1 (nodes, inserted: &inserted, |
3280 | matched_vtables: &matched_vtables, |
3281 | otr_type, |
3282 | type: outer_type->derived_types[i], |
3283 | otr_token, outer_type: outer_type->type, |
3284 | offset: context.offset, completep: &complete, |
3285 | bases_to_consider, |
3286 | consider_construction: context.maybe_in_construction); |
3287 | |
3288 | if (!outer_type->all_derivations_known) |
3289 | { |
3290 | if (!speculative && final_warning_records |
3291 | && nodes.length () == 1 |
3292 | && TREE_CODE (TREE_TYPE (nodes[0]->decl)) == METHOD_TYPE) |
3293 | { |
3294 | if (complete |
3295 | && warn_suggest_final_types |
3296 | && !outer_type->derived_types.length ()) |
3297 | { |
3298 | final_warning_records->grow_type_warnings |
3299 | (newlen: outer_type->id); |
3300 | final_warning_records->type_warnings[outer_type->id].count++; |
3301 | if (!final_warning_records->type_warnings |
3302 | [outer_type->id].dyn_count.initialized_p ()) |
3303 | final_warning_records->type_warnings |
3304 | [outer_type->id].dyn_count = profile_count::zero (); |
3305 | final_warning_records->type_warnings[outer_type->id].dyn_count |
3306 | += final_warning_records->dyn_count; |
3307 | final_warning_records->type_warnings[outer_type->id].type |
3308 | = outer_type->type; |
3309 | (*slot)->type_warning = outer_type->id + 1; |
3310 | } |
3311 | if (complete |
3312 | && warn_suggest_final_methods |
3313 | && types_same_for_odr (DECL_CONTEXT (nodes[0]->decl), |
3314 | type2: outer_type->type)) |
3315 | { |
3316 | bool existed; |
3317 | struct decl_warn_count &c = |
3318 | final_warning_records->decl_warnings.get_or_insert |
3319 | (k: nodes[0]->decl, existed: &existed); |
3320 | |
3321 | if (existed) |
3322 | { |
3323 | c.count++; |
3324 | c.dyn_count += final_warning_records->dyn_count; |
3325 | } |
3326 | else |
3327 | { |
3328 | c.count = 1; |
3329 | c.dyn_count = final_warning_records->dyn_count; |
3330 | c.decl = nodes[0]->decl; |
3331 | } |
3332 | (*slot)->decl_warning = nodes[0]->decl; |
3333 | } |
3334 | } |
3335 | complete = false; |
3336 | } |
3337 | } |
3338 | |
3339 | if (!speculative) |
3340 | { |
3341 | /* Destructors are never called through construction virtual tables, |
3342 | because the type is always known. One of entries may be |
3343 | cxa_pure_virtual so look to at least two of them. */ |
3344 | if (context.maybe_in_construction) |
3345 | for (i =0 ; i < MIN (nodes.length (), 2); i++) |
3346 | if (DECL_CXX_DESTRUCTOR_P (nodes[i]->decl)) |
3347 | context.maybe_in_construction = false; |
3348 | if (context.maybe_in_construction) |
3349 | { |
3350 | if (type != outer_type |
3351 | && (!skipped |
3352 | || (context.maybe_derived_type |
3353 | && !type_all_derivations_known_p (t: outer_type->type)))) |
3354 | record_targets_from_bases (otr_type, otr_token, outer_type: outer_type->type, |
3355 | offset: context.offset, nodes, inserted: &inserted, |
3356 | matched_vtables: &matched_vtables, completep: &complete); |
3357 | if (skipped) |
3358 | maybe_record_node (nodes, target, inserted: &inserted, can_refer, completep: &complete); |
3359 | for (i = 0; i < bases_to_consider.length(); i++) |
3360 | maybe_record_node (nodes, target: bases_to_consider[i], inserted: &inserted, can_refer, completep: &complete); |
3361 | } |
3362 | } |
3363 | } |
3364 | |
3365 | (*slot)->targets = nodes; |
3366 | (*slot)->complete = complete; |
3367 | (*slot)->n_odr_types = odr_types.length (); |
3368 | if (completep) |
3369 | *completep = complete; |
3370 | |
3371 | timevar_pop (tv: TV_IPA_VIRTUAL_CALL); |
3372 | return nodes; |
3373 | } |
3374 | |
3375 | bool |
3376 | add_decl_warning (const tree &key ATTRIBUTE_UNUSED, const decl_warn_count &value, |
3377 | vec<const decl_warn_count*> *vec) |
3378 | { |
3379 | vec->safe_push (obj: &value); |
3380 | return true; |
3381 | } |
3382 | |
3383 | /* Dump target list TARGETS into FILE. */ |
3384 | |
3385 | static void |
3386 | dump_targets (FILE *f, vec <cgraph_node *> targets, bool verbose) |
3387 | { |
3388 | unsigned int i; |
3389 | |
3390 | for (i = 0; i < targets.length (); i++) |
3391 | { |
3392 | char *name = NULL; |
3393 | if (in_lto_p) |
3394 | name = cplus_demangle_v3 (mangled: targets[i]->asm_name (), options: 0); |
3395 | fprintf (stream: f, format: " %s" , name ? name : targets[i]->dump_name ()); |
3396 | if (in_lto_p) |
3397 | free (ptr: name); |
3398 | if (!targets[i]->definition) |
3399 | fprintf (stream: f, format: " (no definition%s)" , |
3400 | DECL_DECLARED_INLINE_P (targets[i]->decl) |
3401 | ? " inline" : "" ); |
3402 | /* With many targets for every call polymorphic dumps are going to |
3403 | be quadratic in size. */ |
3404 | if (i > 10 && !verbose) |
3405 | { |
3406 | fprintf (stream: f, format: " ... and %i more targets\n" , targets.length () - i); |
3407 | return; |
3408 | } |
3409 | } |
3410 | fprintf (stream: f, format: "\n" ); |
3411 | } |
3412 | |
3413 | /* Dump all possible targets of a polymorphic call. */ |
3414 | |
3415 | void |
3416 | dump_possible_polymorphic_call_targets (FILE *f, |
3417 | tree otr_type, |
3418 | HOST_WIDE_INT otr_token, |
3419 | const ipa_polymorphic_call_context &ctx, |
3420 | bool verbose) |
3421 | { |
3422 | vec <cgraph_node *> targets; |
3423 | bool final; |
3424 | odr_type type = get_odr_type (TYPE_MAIN_VARIANT (otr_type), insert: false); |
3425 | unsigned int len; |
3426 | |
3427 | if (!type) |
3428 | return; |
3429 | targets = possible_polymorphic_call_targets (otr_type, otr_token, |
3430 | context: ctx, |
3431 | completep: &final, NULL, speculative: false); |
3432 | fprintf (stream: f, format: " Targets of polymorphic call of type %i:" , type->id); |
3433 | print_generic_expr (f, type->type, TDF_SLIM); |
3434 | fprintf (stream: f, format: " token %i\n" , (int)otr_token); |
3435 | |
3436 | ctx.dump (f); |
3437 | |
3438 | fprintf (stream: f, format: " %s%s%s%s\n " , |
3439 | final ? "This is a complete list." : |
3440 | "This is partial list; extra targets may be defined in other units." , |
3441 | ctx.maybe_in_construction ? " (base types included)" : "" , |
3442 | ctx.maybe_derived_type ? " (derived types included)" : "" , |
3443 | ctx.speculative_maybe_derived_type ? " (speculative derived types included)" : "" ); |
3444 | len = targets.length (); |
3445 | dump_targets (f, targets, verbose); |
3446 | |
3447 | targets = possible_polymorphic_call_targets (otr_type, otr_token, |
3448 | context: ctx, |
3449 | completep: &final, NULL, speculative: true); |
3450 | if (targets.length () != len) |
3451 | { |
3452 | fprintf (stream: f, format: " Speculative targets:" ); |
3453 | dump_targets (f, targets, verbose); |
3454 | } |
3455 | /* Ugly: during callgraph construction the target cache may get populated |
3456 | before all targets are found. While this is harmless (because all local |
3457 | types are discovered and only in those case we devirtualize fully and we |
3458 | don't do speculative devirtualization before IPA stage) it triggers |
3459 | assert here when dumping at that stage also populates the case with |
3460 | speculative targets. Quietly ignore this. */ |
3461 | gcc_assert (symtab->state < IPA_SSA || targets.length () <= len); |
3462 | fprintf (stream: f, format: "\n" ); |
3463 | } |
3464 | |
3465 | |
3466 | /* Return true if N can be possibly target of a polymorphic call of |
3467 | OTR_TYPE/OTR_TOKEN. */ |
3468 | |
3469 | bool |
3470 | possible_polymorphic_call_target_p (tree otr_type, |
3471 | HOST_WIDE_INT otr_token, |
3472 | const ipa_polymorphic_call_context &ctx, |
3473 | struct cgraph_node *n) |
3474 | { |
3475 | vec <cgraph_node *> targets; |
3476 | unsigned int i; |
3477 | bool final; |
3478 | |
3479 | if (fndecl_built_in_p (node: n->decl, klass: BUILT_IN_NORMAL) |
3480 | && (DECL_FUNCTION_CODE (decl: n->decl) == BUILT_IN_UNREACHABLE |
3481 | || DECL_FUNCTION_CODE (decl: n->decl) == BUILT_IN_TRAP |
3482 | || DECL_FUNCTION_CODE (decl: n->decl) == BUILT_IN_UNREACHABLE_TRAP)) |
3483 | return true; |
3484 | |
3485 | if (is_cxa_pure_virtual_p (target: n->decl)) |
3486 | return true; |
3487 | |
3488 | if (!odr_hash) |
3489 | return true; |
3490 | targets = possible_polymorphic_call_targets (otr_type, otr_token, context: ctx, completep: &final); |
3491 | for (i = 0; i < targets.length (); i++) |
3492 | if (n->semantically_equivalent_p (target: targets[i])) |
3493 | return true; |
3494 | |
3495 | /* At a moment we allow middle end to dig out new external declarations |
3496 | as a targets of polymorphic calls. */ |
3497 | if (!final && !n->definition) |
3498 | return true; |
3499 | return false; |
3500 | } |
3501 | |
3502 | |
3503 | |
3504 | /* Return true if N can be possibly target of a polymorphic call of |
3505 | OBJ_TYPE_REF expression REF in STMT. */ |
3506 | |
3507 | bool |
3508 | possible_polymorphic_call_target_p (tree ref, |
3509 | gimple *stmt, |
3510 | struct cgraph_node *n) |
3511 | { |
3512 | ipa_polymorphic_call_context context (current_function_decl, ref, stmt); |
3513 | tree call_fn = gimple_call_fn (gs: stmt); |
3514 | |
3515 | return possible_polymorphic_call_target_p (otr_type: obj_type_ref_class (ref: call_fn), |
3516 | otr_token: tree_to_uhwi |
3517 | (OBJ_TYPE_REF_TOKEN (call_fn)), |
3518 | ctx: context, |
3519 | n); |
3520 | } |
3521 | |
3522 | |
3523 | /* After callgraph construction new external nodes may appear. |
3524 | Add them into the graph. */ |
3525 | |
3526 | void |
3527 | update_type_inheritance_graph (void) |
3528 | { |
3529 | struct cgraph_node *n; |
3530 | |
3531 | if (!odr_hash) |
3532 | return; |
3533 | free_polymorphic_call_targets_hash (); |
3534 | timevar_push (tv: TV_IPA_INHERITANCE); |
3535 | /* We reconstruct the graph starting from types of all methods seen in the |
3536 | unit. */ |
3537 | FOR_EACH_FUNCTION (n) |
3538 | if (DECL_VIRTUAL_P (n->decl) |
3539 | && !n->definition |
3540 | && n->real_symbol_p ()) |
3541 | get_odr_type (TYPE_METHOD_BASETYPE (TREE_TYPE (n->decl)), insert: true); |
3542 | timevar_pop (tv: TV_IPA_INHERITANCE); |
3543 | } |
3544 | |
3545 | |
3546 | /* Return true if N looks like likely target of a polymorphic call. |
3547 | Rule out cxa_pure_virtual, noreturns, function declared cold and |
3548 | other obvious cases. */ |
3549 | |
3550 | bool |
3551 | likely_target_p (struct cgraph_node *n) |
3552 | { |
3553 | int flags; |
3554 | /* cxa_pure_virtual and similar things are not likely. */ |
3555 | if (TREE_CODE (TREE_TYPE (n->decl)) != METHOD_TYPE) |
3556 | return false; |
3557 | flags = flags_from_decl_or_type (n->decl); |
3558 | if (flags & ECF_NORETURN) |
3559 | return false; |
3560 | if (lookup_attribute (attr_name: "cold" , |
3561 | DECL_ATTRIBUTES (n->decl))) |
3562 | return false; |
3563 | if (n->frequency < NODE_FREQUENCY_NORMAL) |
3564 | return false; |
3565 | /* If there are no live virtual tables referring the target, |
3566 | the only way the target can be called is an instance coming from other |
3567 | compilation unit; speculative devirtualization is built around an |
3568 | assumption that won't happen. */ |
3569 | if (!referenced_from_vtable_p (node: n)) |
3570 | return false; |
3571 | return true; |
3572 | } |
3573 | |
3574 | /* Compare type warning records P1 and P2 and choose one with larger count; |
3575 | helper for qsort. */ |
3576 | |
3577 | static int |
3578 | type_warning_cmp (const void *p1, const void *p2) |
3579 | { |
3580 | const odr_type_warn_count *t1 = (const odr_type_warn_count *)p1; |
3581 | const odr_type_warn_count *t2 = (const odr_type_warn_count *)p2; |
3582 | |
3583 | if (t1->dyn_count < t2->dyn_count) |
3584 | return 1; |
3585 | if (t1->dyn_count > t2->dyn_count) |
3586 | return -1; |
3587 | return t2->count - t1->count; |
3588 | } |
3589 | |
3590 | /* Compare decl warning records P1 and P2 and choose one with larger count; |
3591 | helper for qsort. */ |
3592 | |
3593 | static int |
3594 | decl_warning_cmp (const void *p1, const void *p2) |
3595 | { |
3596 | const decl_warn_count *t1 = *(const decl_warn_count * const *)p1; |
3597 | const decl_warn_count *t2 = *(const decl_warn_count * const *)p2; |
3598 | |
3599 | if (t1->dyn_count < t2->dyn_count) |
3600 | return 1; |
3601 | if (t1->dyn_count > t2->dyn_count) |
3602 | return -1; |
3603 | return t2->count - t1->count; |
3604 | } |
3605 | |
3606 | |
3607 | /* Try to speculatively devirtualize call to OTR_TYPE with OTR_TOKEN with |
3608 | context CTX. */ |
3609 | |
3610 | struct cgraph_node * |
3611 | try_speculative_devirtualization (tree otr_type, HOST_WIDE_INT otr_token, |
3612 | ipa_polymorphic_call_context ctx) |
3613 | { |
3614 | vec <cgraph_node *>targets |
3615 | = possible_polymorphic_call_targets |
3616 | (otr_type, otr_token, context: ctx, NULL, NULL, speculative: true); |
3617 | unsigned int i; |
3618 | struct cgraph_node *likely_target = NULL; |
3619 | |
3620 | for (i = 0; i < targets.length (); i++) |
3621 | if (likely_target_p (n: targets[i])) |
3622 | { |
3623 | if (likely_target) |
3624 | return NULL; |
3625 | likely_target = targets[i]; |
3626 | } |
3627 | if (!likely_target |
3628 | ||!likely_target->definition |
3629 | || DECL_EXTERNAL (likely_target->decl)) |
3630 | return NULL; |
3631 | |
3632 | /* Don't use an implicitly-declared destructor (c++/58678). */ |
3633 | struct cgraph_node *non_thunk_target |
3634 | = likely_target->function_symbol (); |
3635 | if (DECL_ARTIFICIAL (non_thunk_target->decl)) |
3636 | return NULL; |
3637 | if (likely_target->get_availability () <= AVAIL_INTERPOSABLE |
3638 | && likely_target->can_be_discarded_p ()) |
3639 | return NULL; |
3640 | return likely_target; |
3641 | } |
3642 | |
3643 | /* The ipa-devirt pass. |
3644 | When polymorphic call has only one likely target in the unit, |
3645 | turn it into a speculative call. */ |
3646 | |
3647 | static unsigned int |
3648 | ipa_devirt (void) |
3649 | { |
3650 | struct cgraph_node *n; |
3651 | hash_set<void *> bad_call_targets; |
3652 | struct cgraph_edge *e; |
3653 | |
3654 | int npolymorphic = 0, nspeculated = 0, nconverted = 0, ncold = 0; |
3655 | int nmultiple = 0, noverwritable = 0, ndevirtualized = 0, nnotdefined = 0; |
3656 | int nwrong = 0, nok = 0, nexternal = 0, nartificial = 0; |
3657 | int ndropped = 0; |
3658 | |
3659 | if (!odr_types_ptr) |
3660 | return 0; |
3661 | |
3662 | if (dump_file) |
3663 | dump_type_inheritance_graph (f: dump_file); |
3664 | |
3665 | /* We can output -Wsuggest-final-methods and -Wsuggest-final-types warnings. |
3666 | This is implemented by setting up final_warning_records that are updated |
3667 | by get_polymorphic_call_targets. |
3668 | We need to clear cache in this case to trigger recomputation of all |
3669 | entries. */ |
3670 | if (warn_suggest_final_methods || warn_suggest_final_types) |
3671 | { |
3672 | final_warning_records = new (final_warning_record); |
3673 | final_warning_records->dyn_count = profile_count::zero (); |
3674 | final_warning_records->grow_type_warnings (odr_types.length ()); |
3675 | free_polymorphic_call_targets_hash (); |
3676 | } |
3677 | |
3678 | FOR_EACH_DEFINED_FUNCTION (n) |
3679 | { |
3680 | bool update = false; |
3681 | if (!opt_for_fn (n->decl, flag_devirtualize)) |
3682 | continue; |
3683 | if (dump_file && n->indirect_calls) |
3684 | fprintf (stream: dump_file, format: "\n\nProcesing function %s\n" , |
3685 | n->dump_name ()); |
3686 | for (e = n->indirect_calls; e; e = e->next_callee) |
3687 | if (e->indirect_info->polymorphic) |
3688 | { |
3689 | struct cgraph_node *likely_target = NULL; |
3690 | void *cache_token; |
3691 | bool final; |
3692 | |
3693 | if (final_warning_records) |
3694 | final_warning_records->dyn_count = e->count.ipa (); |
3695 | |
3696 | vec <cgraph_node *>targets |
3697 | = possible_polymorphic_call_targets |
3698 | (e, completep: &final, cache_token: &cache_token, speculative: true); |
3699 | unsigned int i; |
3700 | |
3701 | /* Trigger warnings by calculating non-speculative targets. */ |
3702 | if (warn_suggest_final_methods || warn_suggest_final_types) |
3703 | possible_polymorphic_call_targets (e); |
3704 | |
3705 | if (dump_file) |
3706 | dump_possible_polymorphic_call_targets |
3707 | (f: dump_file, e, verbose: (dump_flags & TDF_DETAILS)); |
3708 | |
3709 | npolymorphic++; |
3710 | |
3711 | /* See if the call can be devirtualized by means of ipa-prop's |
3712 | polymorphic call context propagation. If not, we can just |
3713 | forget about this call being polymorphic and avoid some heavy |
3714 | lifting in remove_unreachable_nodes that will otherwise try to |
3715 | keep all possible targets alive until inlining and in the inliner |
3716 | itself. |
3717 | |
3718 | This may need to be revisited once we add further ways to use |
3719 | the may edges, but it is a reasonable thing to do right now. */ |
3720 | |
3721 | if ((e->indirect_info->param_index == -1 |
3722 | || (!opt_for_fn (n->decl, flag_devirtualize_speculatively) |
3723 | && e->indirect_info->vptr_changed)) |
3724 | && !flag_ltrans_devirtualize) |
3725 | { |
3726 | e->indirect_info->polymorphic = false; |
3727 | ndropped++; |
3728 | if (dump_file) |
3729 | fprintf (stream: dump_file, format: "Dropping polymorphic call info;" |
3730 | " it cannot be used by ipa-prop\n" ); |
3731 | } |
3732 | |
3733 | if (!opt_for_fn (n->decl, flag_devirtualize_speculatively)) |
3734 | continue; |
3735 | |
3736 | if (!e->maybe_hot_p ()) |
3737 | { |
3738 | if (dump_file) |
3739 | fprintf (stream: dump_file, format: "Call is cold\n\n" ); |
3740 | ncold++; |
3741 | continue; |
3742 | } |
3743 | if (e->speculative) |
3744 | { |
3745 | if (dump_file) |
3746 | fprintf (stream: dump_file, format: "Call is already speculated\n\n" ); |
3747 | nspeculated++; |
3748 | |
3749 | /* When dumping see if we agree with speculation. */ |
3750 | if (!dump_file) |
3751 | continue; |
3752 | } |
3753 | if (bad_call_targets.contains (k: cache_token)) |
3754 | { |
3755 | if (dump_file) |
3756 | fprintf (stream: dump_file, format: "Target list is known to be useless\n\n" ); |
3757 | nmultiple++; |
3758 | continue; |
3759 | } |
3760 | for (i = 0; i < targets.length (); i++) |
3761 | if (likely_target_p (n: targets[i])) |
3762 | { |
3763 | if (likely_target) |
3764 | { |
3765 | likely_target = NULL; |
3766 | if (dump_file) |
3767 | fprintf (stream: dump_file, format: "More than one likely target\n\n" ); |
3768 | nmultiple++; |
3769 | break; |
3770 | } |
3771 | likely_target = targets[i]; |
3772 | } |
3773 | if (!likely_target) |
3774 | { |
3775 | bad_call_targets.add (k: cache_token); |
3776 | continue; |
3777 | } |
3778 | /* This is reached only when dumping; check if we agree or disagree |
3779 | with the speculation. */ |
3780 | if (e->speculative) |
3781 | { |
3782 | bool found = e->speculative_call_for_target (likely_target); |
3783 | if (found) |
3784 | { |
3785 | fprintf (stream: dump_file, format: "We agree with speculation\n\n" ); |
3786 | nok++; |
3787 | } |
3788 | else |
3789 | { |
3790 | fprintf (stream: dump_file, format: "We disagree with speculation\n\n" ); |
3791 | nwrong++; |
3792 | } |
3793 | continue; |
3794 | } |
3795 | if (!likely_target->definition) |
3796 | { |
3797 | if (dump_file) |
3798 | fprintf (stream: dump_file, format: "Target is not a definition\n\n" ); |
3799 | nnotdefined++; |
3800 | continue; |
3801 | } |
3802 | /* Do not introduce new references to external symbols. While we |
3803 | can handle these just well, it is common for programs to |
3804 | incorrectly with headers defining methods they are linked |
3805 | with. */ |
3806 | if (DECL_EXTERNAL (likely_target->decl)) |
3807 | { |
3808 | if (dump_file) |
3809 | fprintf (stream: dump_file, format: "Target is external\n\n" ); |
3810 | nexternal++; |
3811 | continue; |
3812 | } |
3813 | /* Don't use an implicitly-declared destructor (c++/58678). */ |
3814 | struct cgraph_node *non_thunk_target |
3815 | = likely_target->function_symbol (); |
3816 | if (DECL_ARTIFICIAL (non_thunk_target->decl)) |
3817 | { |
3818 | if (dump_file) |
3819 | fprintf (stream: dump_file, format: "Target is artificial\n\n" ); |
3820 | nartificial++; |
3821 | continue; |
3822 | } |
3823 | if (likely_target->get_availability () <= AVAIL_INTERPOSABLE |
3824 | && likely_target->can_be_discarded_p ()) |
3825 | { |
3826 | if (dump_file) |
3827 | fprintf (stream: dump_file, format: "Target is overwritable\n\n" ); |
3828 | noverwritable++; |
3829 | continue; |
3830 | } |
3831 | else if (dbg_cnt (index: devirt)) |
3832 | { |
3833 | if (dump_enabled_p ()) |
3834 | { |
3835 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt, |
3836 | "speculatively devirtualizing call " |
3837 | "in %s to %s\n" , |
3838 | n->dump_name (), |
3839 | likely_target->dump_name ()); |
3840 | } |
3841 | if (!likely_target->can_be_discarded_p ()) |
3842 | { |
3843 | cgraph_node *alias; |
3844 | alias = dyn_cast<cgraph_node *> (p: likely_target->noninterposable_alias ()); |
3845 | if (alias) |
3846 | likely_target = alias; |
3847 | } |
3848 | nconverted++; |
3849 | update = true; |
3850 | e->make_speculative |
3851 | (n2: likely_target, direct_count: e->count.apply_scale (num: 8, den: 10)); |
3852 | } |
3853 | } |
3854 | if (update) |
3855 | ipa_update_overall_fn_summary (node: n); |
3856 | } |
3857 | if (warn_suggest_final_methods || warn_suggest_final_types) |
3858 | { |
3859 | if (warn_suggest_final_types) |
3860 | { |
3861 | final_warning_records->type_warnings.qsort (type_warning_cmp); |
3862 | for (unsigned int i = 0; |
3863 | i < final_warning_records->type_warnings.length (); i++) |
3864 | if (final_warning_records->type_warnings[i].count) |
3865 | { |
3866 | tree type = final_warning_records->type_warnings[i].type; |
3867 | int count = final_warning_records->type_warnings[i].count; |
3868 | profile_count dyn_count |
3869 | = final_warning_records->type_warnings[i].dyn_count; |
3870 | |
3871 | if (!(dyn_count > 0)) |
3872 | warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)), |
3873 | OPT_Wsuggest_final_types, count, |
3874 | "Declaring type %qD final " |
3875 | "would enable devirtualization of %i call" , |
3876 | "Declaring type %qD final " |
3877 | "would enable devirtualization of %i calls" , |
3878 | type, |
3879 | count); |
3880 | else |
3881 | warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)), |
3882 | OPT_Wsuggest_final_types, count, |
3883 | "Declaring type %qD final " |
3884 | "would enable devirtualization of %i call " |
3885 | "executed %lli times" , |
3886 | "Declaring type %qD final " |
3887 | "would enable devirtualization of %i calls " |
3888 | "executed %lli times" , |
3889 | type, |
3890 | count, |
3891 | (long long) dyn_count.to_gcov_type ()); |
3892 | } |
3893 | } |
3894 | |
3895 | if (warn_suggest_final_methods) |
3896 | { |
3897 | auto_vec<const decl_warn_count*> decl_warnings_vec; |
3898 | |
3899 | final_warning_records->decl_warnings.traverse |
3900 | <vec<const decl_warn_count *> *, add_decl_warning> (a: &decl_warnings_vec); |
3901 | decl_warnings_vec.qsort (decl_warning_cmp); |
3902 | for (unsigned int i = 0; i < decl_warnings_vec.length (); i++) |
3903 | { |
3904 | tree decl = decl_warnings_vec[i]->decl; |
3905 | int count = decl_warnings_vec[i]->count; |
3906 | profile_count dyn_count |
3907 | = decl_warnings_vec[i]->dyn_count; |
3908 | |
3909 | if (!(dyn_count > 0)) |
3910 | if (DECL_CXX_DESTRUCTOR_P (decl)) |
3911 | warning_n (DECL_SOURCE_LOCATION (decl), |
3912 | OPT_Wsuggest_final_methods, count, |
3913 | "Declaring virtual destructor of %qD final " |
3914 | "would enable devirtualization of %i call" , |
3915 | "Declaring virtual destructor of %qD final " |
3916 | "would enable devirtualization of %i calls" , |
3917 | DECL_CONTEXT (decl), count); |
3918 | else |
3919 | warning_n (DECL_SOURCE_LOCATION (decl), |
3920 | OPT_Wsuggest_final_methods, count, |
3921 | "Declaring method %qD final " |
3922 | "would enable devirtualization of %i call" , |
3923 | "Declaring method %qD final " |
3924 | "would enable devirtualization of %i calls" , |
3925 | decl, count); |
3926 | else if (DECL_CXX_DESTRUCTOR_P (decl)) |
3927 | warning_n (DECL_SOURCE_LOCATION (decl), |
3928 | OPT_Wsuggest_final_methods, count, |
3929 | "Declaring virtual destructor of %qD final " |
3930 | "would enable devirtualization of %i call " |
3931 | "executed %lli times" , |
3932 | "Declaring virtual destructor of %qD final " |
3933 | "would enable devirtualization of %i calls " |
3934 | "executed %lli times" , |
3935 | DECL_CONTEXT (decl), count, |
3936 | (long long)dyn_count.to_gcov_type ()); |
3937 | else |
3938 | warning_n (DECL_SOURCE_LOCATION (decl), |
3939 | OPT_Wsuggest_final_methods, count, |
3940 | "Declaring method %qD final " |
3941 | "would enable devirtualization of %i call " |
3942 | "executed %lli times" , |
3943 | "Declaring method %qD final " |
3944 | "would enable devirtualization of %i calls " |
3945 | "executed %lli times" , |
3946 | decl, count, |
3947 | (long long)dyn_count.to_gcov_type ()); |
3948 | } |
3949 | } |
3950 | |
3951 | delete (final_warning_records); |
3952 | final_warning_records = 0; |
3953 | } |
3954 | |
3955 | if (dump_file) |
3956 | fprintf (stream: dump_file, |
3957 | format: "%i polymorphic calls, %i devirtualized," |
3958 | " %i speculatively devirtualized, %i cold\n" |
3959 | "%i have multiple targets, %i overwritable," |
3960 | " %i already speculated (%i agree, %i disagree)," |
3961 | " %i external, %i not defined, %i artificial, %i infos dropped\n" , |
3962 | npolymorphic, ndevirtualized, nconverted, ncold, |
3963 | nmultiple, noverwritable, nspeculated, nok, nwrong, |
3964 | nexternal, nnotdefined, nartificial, ndropped); |
3965 | return ndevirtualized || ndropped ? TODO_remove_functions : 0; |
3966 | } |
3967 | |
3968 | namespace { |
3969 | |
3970 | const pass_data pass_data_ipa_devirt = |
3971 | { |
3972 | .type: IPA_PASS, /* type */ |
3973 | .name: "devirt" , /* name */ |
3974 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3975 | .tv_id: TV_IPA_DEVIRT, /* tv_id */ |
3976 | .properties_required: 0, /* properties_required */ |
3977 | .properties_provided: 0, /* properties_provided */ |
3978 | .properties_destroyed: 0, /* properties_destroyed */ |
3979 | .todo_flags_start: 0, /* todo_flags_start */ |
3980 | .todo_flags_finish: ( TODO_dump_symtab ), /* todo_flags_finish */ |
3981 | }; |
3982 | |
3983 | class pass_ipa_devirt : public ipa_opt_pass_d |
3984 | { |
3985 | public: |
3986 | pass_ipa_devirt (gcc::context *ctxt) |
3987 | : ipa_opt_pass_d (pass_data_ipa_devirt, ctxt, |
3988 | NULL, /* generate_summary */ |
3989 | NULL, /* write_summary */ |
3990 | NULL, /* read_summary */ |
3991 | NULL, /* write_optimization_summary */ |
3992 | NULL, /* read_optimization_summary */ |
3993 | NULL, /* stmt_fixup */ |
3994 | 0, /* function_transform_todo_flags_start */ |
3995 | NULL, /* function_transform */ |
3996 | NULL) /* variable_transform */ |
3997 | {} |
3998 | |
3999 | /* opt_pass methods: */ |
4000 | bool gate (function *) final override |
4001 | { |
4002 | /* In LTO, always run the IPA passes and decide on function basis if the |
4003 | pass is enabled. */ |
4004 | if (in_lto_p) |
4005 | return true; |
4006 | return (flag_devirtualize |
4007 | && (flag_devirtualize_speculatively |
4008 | || (warn_suggest_final_methods |
4009 | || warn_suggest_final_types)) |
4010 | && optimize); |
4011 | } |
4012 | |
4013 | unsigned int execute (function *) final override { return ipa_devirt (); } |
4014 | |
4015 | }; // class pass_ipa_devirt |
4016 | |
4017 | } // anon namespace |
4018 | |
4019 | ipa_opt_pass_d * |
4020 | make_pass_ipa_devirt (gcc::context *ctxt) |
4021 | { |
4022 | return new pass_ipa_devirt (ctxt); |
4023 | } |
4024 | |
4025 | /* Print ODR name of a TYPE if available. |
4026 | Use demangler when option DEMANGLE is used. */ |
4027 | |
4028 | DEBUG_FUNCTION void |
4029 | debug_tree_odr_name (tree type, bool demangle) |
4030 | { |
4031 | const char *odr = get_odr_name_for_type (type); |
4032 | if (demangle) |
4033 | { |
4034 | const int opts = DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES; |
4035 | odr = cplus_demangle (mangled: odr, options: opts); |
4036 | } |
4037 | |
4038 | fprintf (stderr, format: "%s\n" , odr); |
4039 | } |
4040 | |
4041 | /* Register ODR enum so we later stream record about its values. */ |
4042 | |
4043 | void |
4044 | register_odr_enum (tree t) |
4045 | { |
4046 | if (flag_lto) |
4047 | vec_safe_push (v&: odr_enums, obj: t); |
4048 | } |
4049 | |
4050 | /* Write ODR enums to LTO stream file. */ |
4051 | |
4052 | static void |
4053 | ipa_odr_summary_write (void) |
4054 | { |
4055 | if (!odr_enums && !odr_enum_map) |
4056 | return; |
4057 | struct output_block *ob = create_output_block (LTO_section_odr_types); |
4058 | unsigned int i; |
4059 | tree t; |
4060 | |
4061 | if (odr_enums) |
4062 | { |
4063 | streamer_write_uhwi (ob, odr_enums->length ()); |
4064 | |
4065 | /* For every ODR enum stream out |
4066 | - its ODR name |
4067 | - number of values, |
4068 | - value names and constant their represent |
4069 | - bitpack of locations so we can do good diagnostics. */ |
4070 | FOR_EACH_VEC_ELT (*odr_enums, i, t) |
4071 | { |
4072 | streamer_write_string (ob, ob->main_stream, |
4073 | IDENTIFIER_POINTER |
4074 | (DECL_ASSEMBLER_NAME (TYPE_NAME (t))), |
4075 | true); |
4076 | |
4077 | int n = 0; |
4078 | for (tree e = TYPE_VALUES (t); e; e = TREE_CHAIN (e)) |
4079 | n++; |
4080 | streamer_write_uhwi (ob, n); |
4081 | for (tree e = TYPE_VALUES (t); e; e = TREE_CHAIN (e)) |
4082 | { |
4083 | streamer_write_string (ob, ob->main_stream, |
4084 | IDENTIFIER_POINTER (TREE_PURPOSE (e)), |
4085 | true); |
4086 | streamer_write_wide_int (ob, |
4087 | wi::to_wide (DECL_INITIAL |
4088 | (TREE_VALUE (e)))); |
4089 | } |
4090 | |
4091 | bitpack_d bp = bitpack_create (s: ob->main_stream); |
4092 | lto_output_location (ob, &bp, DECL_SOURCE_LOCATION (TYPE_NAME (t))); |
4093 | for (tree e = TYPE_VALUES (t); e; e = TREE_CHAIN (e)) |
4094 | lto_output_location (ob, &bp, |
4095 | DECL_SOURCE_LOCATION (TREE_VALUE (e))); |
4096 | streamer_write_bitpack (bp: &bp); |
4097 | } |
4098 | vec_free (v&: odr_enums); |
4099 | odr_enums = NULL; |
4100 | } |
4101 | /* During LTO incremental linking we already have streamed in types. */ |
4102 | else if (odr_enum_map) |
4103 | { |
4104 | gcc_checking_assert (!odr_enums); |
4105 | streamer_write_uhwi (ob, odr_enum_map->elements ()); |
4106 | |
4107 | hash_map<nofree_string_hash, odr_enum>::iterator iter |
4108 | = odr_enum_map->begin (); |
4109 | for (; iter != odr_enum_map->end (); ++iter) |
4110 | { |
4111 | odr_enum &this_enum = (*iter).second; |
4112 | streamer_write_string (ob, ob->main_stream, (*iter).first, true); |
4113 | |
4114 | streamer_write_uhwi (ob, this_enum.vals.length ()); |
4115 | for (unsigned j = 0; j < this_enum.vals.length (); j++) |
4116 | { |
4117 | streamer_write_string (ob, ob->main_stream, |
4118 | this_enum.vals[j].name, true); |
4119 | streamer_write_wide_int (ob, this_enum.vals[j].val); |
4120 | } |
4121 | |
4122 | bitpack_d bp = bitpack_create (s: ob->main_stream); |
4123 | lto_output_location (ob, &bp, this_enum.locus); |
4124 | for (unsigned j = 0; j < this_enum.vals.length (); j++) |
4125 | lto_output_location (ob, &bp, this_enum.vals[j].locus); |
4126 | streamer_write_bitpack (bp: &bp); |
4127 | } |
4128 | |
4129 | delete odr_enum_map; |
4130 | obstack_free (&odr_enum_obstack, NULL); |
4131 | odr_enum_map = NULL; |
4132 | } |
4133 | |
4134 | produce_asm (ob, NULL); |
4135 | destroy_output_block (ob); |
4136 | } |
4137 | |
4138 | /* Write ODR enums from LTO stream file and warn on mismatches. */ |
4139 | |
4140 | static void |
4141 | ipa_odr_read_section (struct lto_file_decl_data *file_data, const char *data, |
4142 | size_t len) |
4143 | { |
4144 | const struct lto_function_header * |
4145 | = (const struct lto_function_header *) data; |
4146 | const int cfg_offset = sizeof (struct lto_function_header); |
4147 | const int main_offset = cfg_offset + header->cfg_size; |
4148 | const int string_offset = main_offset + header->main_size; |
4149 | class data_in *data_in; |
4150 | |
4151 | lto_input_block ib ((const char *) data + main_offset, header->main_size, |
4152 | file_data); |
4153 | |
4154 | data_in |
4155 | = lto_data_in_create (file_data, (const char *) data + string_offset, |
4156 | header->string_size, vNULL); |
4157 | unsigned int n = streamer_read_uhwi (&ib); |
4158 | |
4159 | if (!odr_enum_map) |
4160 | { |
4161 | gcc_obstack_init (&odr_enum_obstack); |
4162 | odr_enum_map = new (hash_map <nofree_string_hash, odr_enum>); |
4163 | } |
4164 | |
4165 | for (unsigned i = 0; i < n; i++) |
4166 | { |
4167 | const char *rname = streamer_read_string (data_in, &ib); |
4168 | unsigned int nvals = streamer_read_uhwi (&ib); |
4169 | char *name; |
4170 | |
4171 | obstack_grow (&odr_enum_obstack, rname, strlen (rname) + 1); |
4172 | name = XOBFINISH (&odr_enum_obstack, char *); |
4173 | |
4174 | bool existed_p; |
4175 | class odr_enum &this_enum |
4176 | = odr_enum_map->get_or_insert (k: xstrdup (name), existed: &existed_p); |
4177 | |
4178 | /* If this is first time we see the enum, remember its definition. */ |
4179 | if (!existed_p) |
4180 | { |
4181 | this_enum.vals.safe_grow_cleared (len: nvals, exact: true); |
4182 | this_enum.warned = false; |
4183 | if (dump_file) |
4184 | fprintf (stream: dump_file, format: "enum %s\n{\n" , name); |
4185 | for (unsigned j = 0; j < nvals; j++) |
4186 | { |
4187 | const char *val_name = streamer_read_string (data_in, &ib); |
4188 | obstack_grow (&odr_enum_obstack, val_name, strlen (val_name) + 1); |
4189 | this_enum.vals[j].name = XOBFINISH (&odr_enum_obstack, char *); |
4190 | this_enum.vals[j].val = streamer_read_wide_int (&ib); |
4191 | if (dump_file) |
4192 | fprintf (stream: dump_file, format: " %s = " HOST_WIDE_INT_PRINT_DEC ",\n" , |
4193 | val_name, wi::fits_shwi_p (x: this_enum.vals[j].val) |
4194 | ? this_enum.vals[j].val.to_shwi () : -1); |
4195 | } |
4196 | bitpack_d bp = streamer_read_bitpack (ib: &ib); |
4197 | stream_input_location (&this_enum.locus, &bp, data_in); |
4198 | for (unsigned j = 0; j < nvals; j++) |
4199 | stream_input_location (&this_enum.vals[j].locus, &bp, data_in); |
4200 | data_in->location_cache.apply_location_cache (); |
4201 | if (dump_file) |
4202 | fprintf (stream: dump_file, format: "}\n" ); |
4203 | } |
4204 | /* If we already have definition, compare it with new one and output |
4205 | warnings if they differs. */ |
4206 | else |
4207 | { |
4208 | int do_warning = -1; |
4209 | char *warn_name = NULL; |
4210 | wide_int warn_value = wi::zero (precision: 1); |
4211 | |
4212 | if (dump_file) |
4213 | fprintf (stream: dump_file, format: "Comparing enum %s\n" , name); |
4214 | |
4215 | /* Look for differences which we will warn about later once locations |
4216 | are streamed. */ |
4217 | for (unsigned j = 0; j < nvals; j++) |
4218 | { |
4219 | const char *id = streamer_read_string (data_in, &ib); |
4220 | wide_int val = streamer_read_wide_int (&ib); |
4221 | |
4222 | if (do_warning != -1 || j >= this_enum.vals.length ()) |
4223 | continue; |
4224 | if (strcmp (s1: id, s2: this_enum.vals[j].name) |
4225 | || (val.get_precision() != |
4226 | this_enum.vals[j].val.get_precision()) |
4227 | || val != this_enum.vals[j].val) |
4228 | { |
4229 | warn_name = xstrdup (id); |
4230 | warn_value = val; |
4231 | do_warning = j; |
4232 | if (dump_file) |
4233 | fprintf (stream: dump_file, format: " Different on entry %i\n" , j); |
4234 | } |
4235 | } |
4236 | |
4237 | /* Stream in locations, but do not apply them unless we are going |
4238 | to warn. */ |
4239 | bitpack_d bp = streamer_read_bitpack (ib: &ib); |
4240 | location_t locus; |
4241 | |
4242 | stream_input_location (&locus, &bp, data_in); |
4243 | |
4244 | /* Did we find a difference? */ |
4245 | if (do_warning != -1 || nvals != this_enum.vals.length ()) |
4246 | { |
4247 | data_in->location_cache.apply_location_cache (); |
4248 | |
4249 | const int opts = DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES; |
4250 | char *dmgname = cplus_demangle (mangled: name, options: opts); |
4251 | if (this_enum.warned |
4252 | || !warning_at (this_enum.locus, |
4253 | OPT_Wodr, "type %qs violates the " |
4254 | "C++ One Definition Rule" , |
4255 | dmgname)) |
4256 | do_warning = -1; |
4257 | else |
4258 | { |
4259 | this_enum.warned = true; |
4260 | if (do_warning == -1) |
4261 | inform (locus, |
4262 | "an enum with different number of values is defined" |
4263 | " in another translation unit" ); |
4264 | else if (warn_name) |
4265 | inform (locus, |
4266 | "an enum with different value name" |
4267 | " is defined in another translation unit" ); |
4268 | else |
4269 | inform (locus, |
4270 | "an enum with different values" |
4271 | " is defined in another translation unit" ); |
4272 | } |
4273 | } |
4274 | else |
4275 | data_in->location_cache.revert_location_cache (); |
4276 | |
4277 | /* Finally look up for location of the actual value that diverged. */ |
4278 | for (unsigned j = 0; j < nvals; j++) |
4279 | { |
4280 | location_t id_locus; |
4281 | |
4282 | data_in->location_cache.revert_location_cache (); |
4283 | stream_input_location (&id_locus, &bp, data_in); |
4284 | |
4285 | if ((int) j == do_warning) |
4286 | { |
4287 | data_in->location_cache.apply_location_cache (); |
4288 | |
4289 | if (strcmp (s1: warn_name, s2: this_enum.vals[j].name)) |
4290 | inform (this_enum.vals[j].locus, |
4291 | "name %qs differs from name %qs defined" |
4292 | " in another translation unit" , |
4293 | this_enum.vals[j].name, warn_name); |
4294 | else if (this_enum.vals[j].val.get_precision() != |
4295 | warn_value.get_precision()) |
4296 | inform (this_enum.vals[j].locus, |
4297 | "name %qs is defined as %u-bit while another " |
4298 | "translation unit defines it as %u-bit" , |
4299 | warn_name, this_enum.vals[j].val.get_precision(), |
4300 | warn_value.get_precision()); |
4301 | /* FIXME: In case there is easy way to print wide_ints, |
4302 | perhaps we could do it here instead of overflow check. */ |
4303 | else if (wi::fits_shwi_p (x: this_enum.vals[j].val) |
4304 | && wi::fits_shwi_p (x: warn_value)) |
4305 | inform (this_enum.vals[j].locus, |
4306 | "name %qs is defined to %wd while another " |
4307 | "translation unit defines it as %wd" , |
4308 | warn_name, this_enum.vals[j].val.to_shwi (), |
4309 | warn_value.to_shwi ()); |
4310 | else |
4311 | inform (this_enum.vals[j].locus, |
4312 | "name %qs is defined to different value " |
4313 | "in another translation unit" , |
4314 | warn_name); |
4315 | |
4316 | inform (id_locus, |
4317 | "mismatching definition" ); |
4318 | } |
4319 | else |
4320 | data_in->location_cache.revert_location_cache (); |
4321 | } |
4322 | if (warn_name) |
4323 | free (ptr: warn_name); |
4324 | obstack_free (&odr_enum_obstack, name); |
4325 | } |
4326 | } |
4327 | lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data, |
4328 | len); |
4329 | lto_data_in_delete (data_in); |
4330 | } |
4331 | |
4332 | /* Read all ODR type sections. */ |
4333 | |
4334 | static void |
4335 | ipa_odr_summary_read (void) |
4336 | { |
4337 | struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data (); |
4338 | struct lto_file_decl_data *file_data; |
4339 | unsigned int j = 0; |
4340 | |
4341 | while ((file_data = file_data_vec[j++])) |
4342 | { |
4343 | size_t len; |
4344 | const char *data |
4345 | = lto_get_summary_section_data (file_data, LTO_section_odr_types, |
4346 | &len); |
4347 | if (data) |
4348 | ipa_odr_read_section (file_data, data, len); |
4349 | } |
4350 | /* Enum info is used only to produce warnings. Only case we will need it |
4351 | again is streaming for incremental LTO. */ |
4352 | if (flag_incremental_link != INCREMENTAL_LINK_LTO) |
4353 | { |
4354 | delete odr_enum_map; |
4355 | obstack_free (&odr_enum_obstack, NULL); |
4356 | odr_enum_map = NULL; |
4357 | } |
4358 | } |
4359 | |
4360 | namespace { |
4361 | |
4362 | const pass_data pass_data_ipa_odr = |
4363 | { |
4364 | .type: IPA_PASS, /* type */ |
4365 | .name: "odr" , /* name */ |
4366 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
4367 | .tv_id: TV_IPA_ODR, /* tv_id */ |
4368 | .properties_required: 0, /* properties_required */ |
4369 | .properties_provided: 0, /* properties_provided */ |
4370 | .properties_destroyed: 0, /* properties_destroyed */ |
4371 | .todo_flags_start: 0, /* todo_flags_start */ |
4372 | .todo_flags_finish: 0, /* todo_flags_finish */ |
4373 | }; |
4374 | |
4375 | class pass_ipa_odr : public ipa_opt_pass_d |
4376 | { |
4377 | public: |
4378 | pass_ipa_odr (gcc::context *ctxt) |
4379 | : ipa_opt_pass_d (pass_data_ipa_odr, ctxt, |
4380 | NULL, /* generate_summary */ |
4381 | ipa_odr_summary_write, /* write_summary */ |
4382 | ipa_odr_summary_read, /* read_summary */ |
4383 | NULL, /* write_optimization_summary */ |
4384 | NULL, /* read_optimization_summary */ |
4385 | NULL, /* stmt_fixup */ |
4386 | 0, /* function_transform_todo_flags_start */ |
4387 | NULL, /* function_transform */ |
4388 | NULL) /* variable_transform */ |
4389 | {} |
4390 | |
4391 | /* opt_pass methods: */ |
4392 | bool gate (function *) final override |
4393 | { |
4394 | return (in_lto_p || flag_lto); |
4395 | } |
4396 | |
4397 | unsigned int execute (function *) final override |
4398 | { |
4399 | return 0; |
4400 | } |
4401 | |
4402 | }; // class pass_ipa_odr |
4403 | |
4404 | } // anon namespace |
4405 | |
4406 | ipa_opt_pass_d * |
4407 | make_pass_ipa_odr (gcc::context *ctxt) |
4408 | { |
4409 | return new pass_ipa_odr (ctxt); |
4410 | } |
4411 | |
4412 | |
4413 | #include "gt-ipa-devirt.h" |
4414 | |