1 | /* Support routines for vrange storage. |
2 | Copyright (C) 2022-2024 Free Software Foundation, Inc. |
3 | Contributed by Aldy Hernandez <aldyh@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | GNU General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "tree.h" |
26 | #include "gimple.h" |
27 | #include "ssa.h" |
28 | #include "tree-pretty-print.h" |
29 | #include "fold-const.h" |
30 | #include "gimple-range.h" |
31 | #include "value-range-storage.h" |
32 | |
33 | // Generic memory allocator to share one interface between GC and |
34 | // obstack allocators. |
35 | |
36 | class vrange_internal_alloc |
37 | { |
38 | public: |
39 | vrange_internal_alloc () { } |
40 | virtual ~vrange_internal_alloc () { } |
41 | virtual void *alloc (size_t size) = 0; |
42 | virtual void free (void *) = 0; |
43 | private: |
44 | DISABLE_COPY_AND_ASSIGN (vrange_internal_alloc); |
45 | }; |
46 | |
47 | class vrange_obstack_alloc final: public vrange_internal_alloc |
48 | { |
49 | public: |
50 | vrange_obstack_alloc () |
51 | { |
52 | obstack_init (&m_obstack); |
53 | } |
54 | virtual ~vrange_obstack_alloc () final override |
55 | { |
56 | obstack_free (&m_obstack, NULL); |
57 | } |
58 | virtual void *alloc (size_t size) final override |
59 | { |
60 | return obstack_alloc (&m_obstack, size); |
61 | } |
62 | virtual void free (void *) final override { } |
63 | private: |
64 | obstack m_obstack; |
65 | }; |
66 | |
67 | class vrange_ggc_alloc final: public vrange_internal_alloc |
68 | { |
69 | public: |
70 | vrange_ggc_alloc () { } |
71 | virtual ~vrange_ggc_alloc () final override { } |
72 | virtual void *alloc (size_t size) final override |
73 | { |
74 | return ggc_internal_alloc (s: size); |
75 | } |
76 | virtual void free (void *p) final override |
77 | { |
78 | return ggc_free (p); |
79 | } |
80 | }; |
81 | |
82 | vrange_allocator::vrange_allocator (bool gc) |
83 | { |
84 | if (gc) |
85 | m_alloc = new vrange_ggc_alloc; |
86 | else |
87 | m_alloc = new vrange_obstack_alloc; |
88 | } |
89 | |
90 | vrange_allocator::~vrange_allocator () |
91 | { |
92 | delete m_alloc; |
93 | } |
94 | |
95 | void * |
96 | vrange_allocator::alloc (size_t size) |
97 | { |
98 | return m_alloc->alloc (size); |
99 | } |
100 | |
101 | void |
102 | vrange_allocator::free (void *p) |
103 | { |
104 | m_alloc->free (p); |
105 | } |
106 | |
107 | // Allocate a new vrange_storage object initialized to R and return |
108 | // it. |
109 | |
110 | vrange_storage * |
111 | vrange_allocator::clone (const vrange &r) |
112 | { |
113 | return vrange_storage::alloc (*m_alloc, r); |
114 | } |
115 | |
116 | vrange_storage * |
117 | vrange_allocator::clone_varying (tree type) |
118 | { |
119 | if (irange::supports_p (type)) |
120 | return irange_storage::alloc (*m_alloc, int_range <1> (type)); |
121 | if (frange::supports_p (type)) |
122 | return frange_storage::alloc (*m_alloc, r: frange (type)); |
123 | return NULL; |
124 | } |
125 | |
126 | vrange_storage * |
127 | vrange_allocator::clone_undefined (tree type) |
128 | { |
129 | if (irange::supports_p (type)) |
130 | return irange_storage::alloc (*m_alloc, int_range<1> ()); |
131 | if (frange::supports_p (type)) |
132 | return frange_storage::alloc (*m_alloc, r: frange ()); |
133 | return NULL; |
134 | } |
135 | |
136 | // Allocate a new vrange_storage object initialized to R and return |
137 | // it. Return NULL if R is unsupported. |
138 | |
139 | vrange_storage * |
140 | vrange_storage::alloc (vrange_internal_alloc &allocator, const vrange &r) |
141 | { |
142 | if (is_a <irange> (v: r)) |
143 | return irange_storage::alloc (allocator, as_a <irange> (v: r)); |
144 | if (is_a <frange> (v: r)) |
145 | return frange_storage::alloc (allocator, r: as_a <frange> (v: r)); |
146 | return NULL; |
147 | } |
148 | |
149 | // Set storage to R. |
150 | |
151 | void |
152 | vrange_storage::set_vrange (const vrange &r) |
153 | { |
154 | if (is_a <irange> (v: r)) |
155 | { |
156 | irange_storage *s = static_cast <irange_storage *> (this); |
157 | gcc_checking_assert (s->fits_p (as_a <irange> (r))); |
158 | s->set_irange (as_a <irange> (v: r)); |
159 | } |
160 | else if (is_a <frange> (v: r)) |
161 | { |
162 | frange_storage *s = static_cast <frange_storage *> (this); |
163 | gcc_checking_assert (s->fits_p (as_a <frange> (r))); |
164 | s->set_frange (as_a <frange> (v: r)); |
165 | } |
166 | else |
167 | gcc_unreachable (); |
168 | } |
169 | |
170 | // Restore R from storage. |
171 | |
172 | void |
173 | vrange_storage::get_vrange (vrange &r, tree type) const |
174 | { |
175 | if (is_a <irange> (v&: r)) |
176 | { |
177 | const irange_storage *s = static_cast <const irange_storage *> (this); |
178 | s->get_irange (r&: as_a <irange> (v&: r), type); |
179 | } |
180 | else if (is_a <frange> (v&: r)) |
181 | { |
182 | const frange_storage *s = static_cast <const frange_storage *> (this); |
183 | s->get_frange (r&: as_a <frange> (v&: r), type); |
184 | } |
185 | else |
186 | gcc_unreachable (); |
187 | } |
188 | |
189 | // Return TRUE if storage can fit R. |
190 | |
191 | bool |
192 | vrange_storage::fits_p (const vrange &r) const |
193 | { |
194 | if (is_a <irange> (v: r)) |
195 | { |
196 | const irange_storage *s = static_cast <const irange_storage *> (this); |
197 | return s->fits_p (r: as_a <irange> (v: r)); |
198 | } |
199 | if (is_a <frange> (v: r)) |
200 | { |
201 | const frange_storage *s = static_cast <const frange_storage *> (this); |
202 | return s->fits_p (as_a <frange> (v: r)); |
203 | } |
204 | gcc_unreachable (); |
205 | return false; |
206 | } |
207 | |
208 | // Return TRUE if the range in storage is equal to R. It is the |
209 | // caller's responsibility to verify that the type of the range in |
210 | // storage matches that of R. |
211 | |
212 | bool |
213 | vrange_storage::equal_p (const vrange &r) const |
214 | { |
215 | if (is_a <irange> (v: r)) |
216 | { |
217 | const irange_storage *s = static_cast <const irange_storage *> (this); |
218 | return s->equal_p (r: as_a <irange> (v: r)); |
219 | } |
220 | if (is_a <frange> (v: r)) |
221 | { |
222 | const frange_storage *s = static_cast <const frange_storage *> (this); |
223 | return s->equal_p (r: as_a <frange> (v: r)); |
224 | } |
225 | gcc_unreachable (); |
226 | } |
227 | |
228 | //============================================================================ |
229 | // irange_storage implementation |
230 | //============================================================================ |
231 | |
232 | unsigned short * |
233 | irange_storage::write_lengths_address () |
234 | { |
235 | return (unsigned short *)&m_val[(m_num_ranges * 2 + 2) |
236 | * WIDE_INT_MAX_HWIS (m_precision)]; |
237 | } |
238 | |
239 | const unsigned short * |
240 | irange_storage::lengths_address () const |
241 | { |
242 | return const_cast <irange_storage *> (this)->write_lengths_address (); |
243 | } |
244 | |
245 | // Allocate a new irange_storage object initialized to R. |
246 | |
247 | irange_storage * |
248 | irange_storage::alloc (vrange_internal_alloc &allocator, const irange &r) |
249 | { |
250 | size_t size = irange_storage::size (r); |
251 | irange_storage *p = static_cast <irange_storage *> (allocator.alloc (size)); |
252 | new (p) irange_storage (r); |
253 | return p; |
254 | } |
255 | |
256 | // Initialize the storage with R. |
257 | |
258 | irange_storage::irange_storage (const irange &r) |
259 | : m_max_ranges (r.num_pairs ()) |
260 | { |
261 | m_num_ranges = m_max_ranges; |
262 | set_irange (r); |
263 | } |
264 | |
265 | static inline void |
266 | write_wide_int (HOST_WIDE_INT *&val, unsigned short *&len, const wide_int &w) |
267 | { |
268 | *len = w.get_len (); |
269 | for (unsigned i = 0; i < *len; ++i) |
270 | *val++ = w.elt (i); |
271 | ++len; |
272 | } |
273 | |
274 | // Store R into the current storage. |
275 | |
276 | void |
277 | irange_storage::set_irange (const irange &r) |
278 | { |
279 | gcc_checking_assert (fits_p (r)); |
280 | |
281 | if (r.undefined_p ()) |
282 | { |
283 | m_kind = VR_UNDEFINED; |
284 | return; |
285 | } |
286 | if (r.varying_p ()) |
287 | { |
288 | m_kind = VR_VARYING; |
289 | return; |
290 | } |
291 | |
292 | m_precision = TYPE_PRECISION (r.type ()); |
293 | m_num_ranges = r.num_pairs (); |
294 | m_kind = VR_RANGE; |
295 | |
296 | HOST_WIDE_INT *val = &m_val[0]; |
297 | unsigned short *len = write_lengths_address (); |
298 | |
299 | for (unsigned i = 0; i < r.num_pairs (); ++i) |
300 | { |
301 | write_wide_int (val, len, w: r.lower_bound (pair: i)); |
302 | write_wide_int (val, len, w: r.upper_bound (pair: i)); |
303 | } |
304 | |
305 | // TODO: We could avoid streaming out the value if the mask is -1. |
306 | irange_bitmask bm = r.m_bitmask; |
307 | write_wide_int (val, len, w: bm.value ()); |
308 | write_wide_int (val, len, w: bm.mask ()); |
309 | |
310 | if (flag_checking) |
311 | { |
312 | int_range_max tmp; |
313 | get_irange (r&: tmp, type: r.type ()); |
314 | gcc_checking_assert (tmp == r); |
315 | } |
316 | } |
317 | |
318 | static inline void |
319 | read_wide_int (wide_int &w, |
320 | const HOST_WIDE_INT *val, unsigned short len, unsigned prec) |
321 | { |
322 | trailing_wide_int_storage stow (prec, &len, |
323 | const_cast <HOST_WIDE_INT *> (val)); |
324 | w = trailing_wide_int (stow); |
325 | } |
326 | |
327 | // Restore a range of TYPE from storage into R. |
328 | |
329 | void |
330 | irange_storage::get_irange (irange &r, tree type) const |
331 | { |
332 | if (m_kind == VR_UNDEFINED) |
333 | { |
334 | r.set_undefined (); |
335 | return; |
336 | } |
337 | if (m_kind == VR_VARYING) |
338 | { |
339 | r.set_varying (type); |
340 | return; |
341 | } |
342 | |
343 | gcc_checking_assert (TYPE_PRECISION (type) == m_precision); |
344 | const HOST_WIDE_INT *val = &m_val[0]; |
345 | const unsigned short *len = lengths_address (); |
346 | |
347 | // Handle the common case where R can fit the new range. |
348 | if (r.m_max_ranges >= m_num_ranges) |
349 | { |
350 | r.m_kind = VR_RANGE; |
351 | r.m_num_ranges = m_num_ranges; |
352 | r.m_type = type; |
353 | for (unsigned i = 0; i < m_num_ranges * 2; ++i) |
354 | { |
355 | read_wide_int (w&: r.m_base[i], val, len: *len, prec: m_precision); |
356 | val += *len++; |
357 | } |
358 | } |
359 | // Otherwise build the range piecewise. |
360 | else |
361 | { |
362 | r.set_undefined (); |
363 | for (unsigned i = 0; i < m_num_ranges; ++i) |
364 | { |
365 | wide_int lb, ub; |
366 | read_wide_int (w&: lb, val, len: *len, prec: m_precision); |
367 | val += *len++; |
368 | read_wide_int (w&: ub, val, len: *len, prec: m_precision); |
369 | val += *len++; |
370 | int_range<1> tmp (type, lb, ub); |
371 | r.union_ (tmp); |
372 | } |
373 | } |
374 | |
375 | wide_int bits_value, bits_mask; |
376 | read_wide_int (w&: bits_value, val, len: *len, prec: m_precision); |
377 | val += *len++; |
378 | read_wide_int (w&: bits_mask, val, len: *len, prec: m_precision); |
379 | r.m_bitmask = irange_bitmask (bits_value, bits_mask); |
380 | if (r.m_kind == VR_VARYING) |
381 | r.m_kind = VR_RANGE; |
382 | |
383 | if (flag_checking) |
384 | r.verify_range (); |
385 | } |
386 | |
387 | bool |
388 | irange_storage::equal_p (const irange &r) const |
389 | { |
390 | if (m_kind == VR_UNDEFINED || r.undefined_p ()) |
391 | return m_kind == r.m_kind; |
392 | if (m_kind == VR_VARYING || r.varying_p ()) |
393 | return m_kind == r.m_kind; |
394 | |
395 | // ?? We could make this faster by doing the comparison in place, |
396 | // without going through get_irange. |
397 | int_range_max tmp; |
398 | get_irange (r&: tmp, type: r.type ()); |
399 | return tmp == r; |
400 | } |
401 | |
402 | // Return the size in bytes to allocate storage that can hold R. |
403 | |
404 | size_t |
405 | irange_storage::size (const irange &r) |
406 | { |
407 | if (r.undefined_p ()) |
408 | return sizeof (irange_storage); |
409 | |
410 | unsigned prec = TYPE_PRECISION (r.type ()); |
411 | unsigned n = r.num_pairs () * 2 + 2; |
412 | unsigned hwi_size = ((n * WIDE_INT_MAX_HWIS (prec) - 1) |
413 | * sizeof (HOST_WIDE_INT)); |
414 | unsigned len_size = n * sizeof (unsigned short); |
415 | return sizeof (irange_storage) + hwi_size + len_size; |
416 | } |
417 | |
418 | // Return TRUE if R fits in the current storage. |
419 | |
420 | bool |
421 | irange_storage::fits_p (const irange &r) const |
422 | { |
423 | return m_max_ranges >= r.num_pairs (); |
424 | } |
425 | |
426 | void |
427 | irange_storage::dump () const |
428 | { |
429 | fprintf (stderr, format: "irange_storage (prec=%d, ranges=%d):\n" , |
430 | m_precision, m_num_ranges); |
431 | |
432 | if (m_num_ranges == 0) |
433 | return; |
434 | |
435 | const HOST_WIDE_INT *val = &m_val[0]; |
436 | const unsigned short *len = lengths_address (); |
437 | int i, j; |
438 | |
439 | fprintf (stderr, format: " lengths = [ " ); |
440 | for (i = 0; i < m_num_ranges * 2 + 2; ++i) |
441 | fprintf (stderr, format: "%d " , len[i]); |
442 | fprintf (stderr, format: "]\n" ); |
443 | |
444 | for (i = 0; i < m_num_ranges; ++i) |
445 | { |
446 | for (j = 0; j < *len; ++j) |
447 | fprintf (stderr, format: " [PAIR %d] LB " HOST_WIDE_INT_PRINT_DEC "\n" , i, |
448 | *val++); |
449 | ++len; |
450 | for (j = 0; j < *len; ++j) |
451 | fprintf (stderr, format: " [PAIR %d] UB " HOST_WIDE_INT_PRINT_DEC "\n" , i, |
452 | *val++); |
453 | ++len; |
454 | } |
455 | |
456 | // Dump value/mask pair. |
457 | for (j = 0; j < *len; ++j) |
458 | fprintf (stderr, format: " [VALUE] " HOST_WIDE_INT_PRINT_DEC "\n" , *val++); |
459 | ++len; |
460 | for (j = 0; j < *len; ++j) |
461 | fprintf (stderr, format: " [MASK] " HOST_WIDE_INT_PRINT_DEC "\n" , *val++); |
462 | } |
463 | |
464 | DEBUG_FUNCTION void |
465 | debug (const irange_storage &storage) |
466 | { |
467 | storage.dump (); |
468 | fprintf (stderr, format: "\n" ); |
469 | } |
470 | |
471 | //============================================================================ |
472 | // frange_storage implementation |
473 | //============================================================================ |
474 | |
475 | // Allocate a new frange_storage object initialized to R. |
476 | |
477 | frange_storage * |
478 | frange_storage::alloc (vrange_internal_alloc &allocator, const frange &r) |
479 | { |
480 | size_t size = sizeof (frange_storage); |
481 | frange_storage *p = static_cast <frange_storage *> (allocator.alloc (size)); |
482 | new (p) frange_storage (r); |
483 | return p; |
484 | } |
485 | |
486 | void |
487 | frange_storage::set_frange (const frange &r) |
488 | { |
489 | gcc_checking_assert (fits_p (r)); |
490 | |
491 | m_kind = r.m_kind; |
492 | m_min = r.m_min; |
493 | m_max = r.m_max; |
494 | m_pos_nan = r.m_pos_nan; |
495 | m_neg_nan = r.m_neg_nan; |
496 | } |
497 | |
498 | void |
499 | frange_storage::get_frange (frange &r, tree type) const |
500 | { |
501 | gcc_checking_assert (r.supports_type_p (type)); |
502 | |
503 | // Handle explicit NANs. |
504 | if (m_kind == VR_NAN) |
505 | { |
506 | if (HONOR_NANS (type)) |
507 | { |
508 | if (m_pos_nan && m_neg_nan) |
509 | r.set_nan (type); |
510 | else |
511 | r.set_nan (type, sign: m_neg_nan); |
512 | } |
513 | else |
514 | r.set_undefined (); |
515 | return; |
516 | } |
517 | if (m_kind == VR_UNDEFINED) |
518 | { |
519 | r.set_undefined (); |
520 | return; |
521 | } |
522 | |
523 | // We use the constructor to create the new range instead of writing |
524 | // out the bits into the frange directly, because the global range |
525 | // being read may be being inlined into a function with different |
526 | // restrictions as when it was originally written. We want to make |
527 | // sure the resulting range is canonicalized correctly for the new |
528 | // consumer. |
529 | r = frange (type, m_min, m_max, m_kind); |
530 | |
531 | // The constructor will set the NAN bits for HONOR_NANS, but we must |
532 | // make sure to set the NAN sign if known. |
533 | if (HONOR_NANS (type) && (m_pos_nan ^ m_neg_nan) == 1) |
534 | r.update_nan (sign: m_neg_nan); |
535 | else if (!m_pos_nan && !m_neg_nan) |
536 | r.clear_nan (); |
537 | } |
538 | |
539 | bool |
540 | frange_storage::equal_p (const frange &r) const |
541 | { |
542 | if (r.undefined_p ()) |
543 | return m_kind == VR_UNDEFINED; |
544 | |
545 | frange tmp; |
546 | get_frange (r&: tmp, type: r.type ()); |
547 | return tmp == r; |
548 | } |
549 | |
550 | bool |
551 | frange_storage::fits_p (const frange &) const |
552 | { |
553 | return true; |
554 | } |
555 | |
556 | static vrange_allocator ggc_vrange_allocator (true); |
557 | |
558 | vrange_storage *ggc_alloc_vrange_storage (tree type) |
559 | { |
560 | return ggc_vrange_allocator.clone_varying (type); |
561 | } |
562 | |
563 | vrange_storage *ggc_alloc_vrange_storage (const vrange &r) |
564 | { |
565 | return ggc_vrange_allocator.clone (r); |
566 | } |
567 | |