1 | /* Support routines for Value Range Propagation (VRP). |
---|---|

2 | Copyright (C) 2005-2017 Free Software Foundation, Inc. |

3 | Contributed by Diego Novillo <dnovillo@redhat.com>. |

4 | |

5 | This file is part of GCC. |

6 | |

7 | GCC is free software; you can redistribute it and/or modify |

8 | it under the terms of the GNU General Public License as published by |

9 | the Free Software Foundation; either version 3, or (at your option) |

10 | any later version. |

11 | |

12 | GCC is distributed in the hope that it will be useful, |

13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |

14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |

15 | GNU General Public License for more details. |

16 | |

17 | You should have received a copy of the GNU General Public License |

18 | along with GCC; see the file COPYING3. If not see |

19 | <http://www.gnu.org/licenses/>. */ |

20 | |

21 | #include "config.h" |

22 | #include "system.h" |

23 | #include "coretypes.h" |

24 | #include "backend.h" |

25 | #include "insn-codes.h" |

26 | #include "rtl.h" |

27 | #include "tree.h" |

28 | #include "gimple.h" |

29 | #include "cfghooks.h" |

30 | #include "tree-pass.h" |

31 | #include "ssa.h" |

32 | #include "optabs-tree.h" |

33 | #include "gimple-pretty-print.h" |

34 | #include "diagnostic-core.h" |

35 | #include "flags.h" |

36 | #include "fold-const.h" |

37 | #include "stor-layout.h" |

38 | #include "calls.h" |

39 | #include "cfganal.h" |

40 | #include "gimple-fold.h" |

41 | #include "tree-eh.h" |

42 | #include "gimple-iterator.h" |

43 | #include "gimple-walk.h" |

44 | #include "tree-cfg.h" |

45 | #include "tree-dfa.h" |

46 | #include "tree-ssa-loop-manip.h" |

47 | #include "tree-ssa-loop-niter.h" |

48 | #include "tree-ssa-loop.h" |

49 | #include "tree-into-ssa.h" |

50 | #include "tree-ssa.h" |

51 | #include "intl.h" |

52 | #include "cfgloop.h" |

53 | #include "tree-scalar-evolution.h" |

54 | #include "tree-ssa-propagate.h" |

55 | #include "tree-chrec.h" |

56 | #include "tree-ssa-threadupdate.h" |

57 | #include "tree-ssa-scopedtables.h" |

58 | #include "tree-ssa-threadedge.h" |

59 | #include "omp-general.h" |

60 | #include "target.h" |

61 | #include "case-cfn-macros.h" |

62 | #include "params.h" |

63 | #include "alloc-pool.h" |

64 | #include "domwalk.h" |

65 | #include "tree-cfgcleanup.h" |

66 | #include "stringpool.h" |

67 | #include "attribs.h" |

68 | #include "vr-values.h" |

69 | #include "builtins.h" |

70 | |

71 | /* Set of SSA names found live during the RPO traversal of the function |

72 | for still active basic-blocks. */ |

73 | static sbitmap *live; |

74 | |

75 | /* Return true if the SSA name NAME is live on the edge E. */ |

76 | |

77 | static bool |

78 | live_on_edge (edge e, tree name) |

79 | { |

80 | return (live[e->dest->index] |

81 | && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |

82 | } |

83 | |

84 | /* Location information for ASSERT_EXPRs. Each instance of this |

85 | structure describes an ASSERT_EXPR for an SSA name. Since a single |

86 | SSA name may have more than one assertion associated with it, these |

87 | locations are kept in a linked list attached to the corresponding |

88 | SSA name. */ |

89 | struct assert_locus |

90 | { |

91 | /* Basic block where the assertion would be inserted. */ |

92 | basic_block bb; |

93 | |

94 | /* Some assertions need to be inserted on an edge (e.g., assertions |

95 | generated by COND_EXPRs). In those cases, BB will be NULL. */ |

96 | edge e; |

97 | |

98 | /* Pointer to the statement that generated this assertion. */ |

99 | gimple_stmt_iterator si; |

100 | |

101 | /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ |

102 | enum tree_code comp_code; |

103 | |

104 | /* Value being compared against. */ |

105 | tree val; |

106 | |

107 | /* Expression to compare. */ |

108 | tree expr; |

109 | |

110 | /* Next node in the linked list. */ |

111 | assert_locus *next; |

112 | }; |

113 | |

114 | /* If bit I is present, it means that SSA name N_i has a list of |

115 | assertions that should be inserted in the IL. */ |

116 | static bitmap need_assert_for; |

117 | |

118 | /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] |

119 | holds a list of ASSERT_LOCUS_T nodes that describe where |

120 | ASSERT_EXPRs for SSA name N_I should be inserted. */ |

121 | static assert_locus **asserts_for; |

122 | |

123 | vec<edge> to_remove_edges; |

124 | vec<switch_update> to_update_switch_stmts; |

125 | |

126 | |

127 | /* Return the maximum value for TYPE. */ |

128 | |

129 | tree |

130 | vrp_val_max (const_tree type) |

131 | { |

132 | if (!INTEGRAL_TYPE_P (type)) |

133 | return NULL_TREE; |

134 | |

135 | return TYPE_MAX_VALUE (type); |

136 | } |

137 | |

138 | /* Return the minimum value for TYPE. */ |

139 | |

140 | tree |

141 | vrp_val_min (const_tree type) |

142 | { |

143 | if (!INTEGRAL_TYPE_P (type)) |

144 | return NULL_TREE; |

145 | |

146 | return TYPE_MIN_VALUE (type); |

147 | } |

148 | |

149 | /* Return whether VAL is equal to the maximum value of its type. |

150 | We can't do a simple equality comparison with TYPE_MAX_VALUE because |

151 | C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE |

152 | is not == to the integer constant with the same value in the type. */ |

153 | |

154 | bool |

155 | vrp_val_is_max (const_tree val) |

156 | { |

157 | tree type_max = vrp_val_max (TREE_TYPE (val)); |

158 | return (val == type_max |

159 | || (type_max != NULL_TREE |

160 | && operand_equal_p (val, type_max, 0))); |

161 | } |

162 | |

163 | /* Return whether VAL is equal to the minimum value of its type. */ |

164 | |

165 | bool |

166 | vrp_val_is_min (const_tree val) |

167 | { |

168 | tree type_min = vrp_val_min (TREE_TYPE (val)); |

169 | return (val == type_min |

170 | || (type_min != NULL_TREE |

171 | && operand_equal_p (val, type_min, 0))); |

172 | } |

173 | |

174 | |

175 | /* Set value range VR to VR_UNDEFINED. */ |

176 | |

177 | static inline void |

178 | set_value_range_to_undefined (value_range *vr) |

179 | { |

180 | vr->type = VR_UNDEFINED; |

181 | vr->min = vr->max = NULL_TREE; |

182 | if (vr->equiv) |

183 | bitmap_clear (vr->equiv); |

184 | } |

185 | |

186 | /* Set value range VR to VR_VARYING. */ |

187 | |

188 | void |

189 | set_value_range_to_varying (value_range *vr) |

190 | { |

191 | vr->type = VR_VARYING; |

192 | vr->min = vr->max = NULL_TREE; |

193 | if (vr->equiv) |

194 | bitmap_clear (vr->equiv); |

195 | } |

196 | |

197 | /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |

198 | |

199 | void |

200 | set_value_range (value_range *vr, enum value_range_type t, tree min, |

201 | tree max, bitmap equiv) |

202 | { |

203 | /* Check the validity of the range. */ |

204 | if (flag_checking |

205 | && (t == VR_RANGE || t == VR_ANTI_RANGE)) |

206 | { |

207 | int cmp; |

208 | |

209 | gcc_assert (min && max); |

210 | |

211 | gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max)); |

212 | |

213 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) |

214 | gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |

215 | |

216 | cmp = compare_values (min, max); |

217 | gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); |

218 | } |

219 | |

220 | if (flag_checking |

221 | && (t == VR_UNDEFINED || t == VR_VARYING)) |

222 | { |

223 | gcc_assert (min == NULL_TREE && max == NULL_TREE); |

224 | gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); |

225 | } |

226 | |

227 | vr->type = t; |

228 | vr->min = min; |

229 | vr->max = max; |

230 | |

231 | /* Since updating the equivalence set involves deep copying the |

232 | bitmaps, only do it if absolutely necessary. |

233 | |

234 | All equivalence bitmaps are allocated from the same obstack. So |

235 | we can use the obstack associated with EQUIV to allocate vr->equiv. */ |

236 | if (vr->equiv == NULL |

237 | && equiv != NULL) |

238 | vr->equiv = BITMAP_ALLOC (equiv->obstack); |

239 | |

240 | if (equiv != vr->equiv) |

241 | { |

242 | if (equiv && !bitmap_empty_p (equiv)) |

243 | bitmap_copy (vr->equiv, equiv); |

244 | else |

245 | bitmap_clear (vr->equiv); |

246 | } |

247 | } |

248 | |

249 | |

250 | /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |

251 | This means adjusting T, MIN and MAX representing the case of a |

252 | wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] |

253 | as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. |

254 | In corner cases where MAX+1 or MIN-1 wraps this will fall back |

255 | to varying. |

256 | This routine exists to ease canonicalization in the case where we |

257 | extract ranges from var + CST op limit. */ |

258 | |

259 | void |

260 | set_and_canonicalize_value_range (value_range *vr, enum value_range_type t, |

261 | tree min, tree max, bitmap equiv) |

262 | { |

263 | /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |

264 | if (t == VR_UNDEFINED) |

265 | { |

266 | set_value_range_to_undefined (vr); |

267 | return; |

268 | } |

269 | else if (t == VR_VARYING) |

270 | { |

271 | set_value_range_to_varying (vr); |

272 | return; |

273 | } |

274 | |

275 | /* Nothing to canonicalize for symbolic ranges. */ |

276 | if (TREE_CODE (min) != INTEGER_CST |

277 | || TREE_CODE (max) != INTEGER_CST) |

278 | { |

279 | set_value_range (vr, t, min, max, equiv); |

280 | return; |

281 | } |

282 | |

283 | /* Wrong order for min and max, to swap them and the VR type we need |

284 | to adjust them. */ |

285 | if (tree_int_cst_lt (max, min)) |

286 | { |

287 | tree one, tmp; |

288 | |

289 | /* For one bit precision if max < min, then the swapped |

290 | range covers all values, so for VR_RANGE it is varying and |

291 | for VR_ANTI_RANGE empty range, so drop to varying as well. */ |

292 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1) |

293 | { |

294 | set_value_range_to_varying (vr); |

295 | return; |

296 | } |

297 | |

298 | one = build_int_cst (TREE_TYPE (min), 1); |

299 | tmp = int_const_binop (PLUS_EXPR, max, one); |

300 | max = int_const_binop (MINUS_EXPR, min, one); |

301 | min = tmp; |

302 | |

303 | /* There's one corner case, if we had [C+1, C] before we now have |

304 | that again. But this represents an empty value range, so drop |

305 | to varying in this case. */ |

306 | if (tree_int_cst_lt (max, min)) |

307 | { |

308 | set_value_range_to_varying (vr); |

309 | return; |

310 | } |

311 | |

312 | t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; |

313 | } |

314 | |

315 | /* Anti-ranges that can be represented as ranges should be so. */ |

316 | if (t == VR_ANTI_RANGE) |

317 | { |

318 | bool is_min = vrp_val_is_min (min); |

319 | bool is_max = vrp_val_is_max (max); |

320 | |

321 | if (is_min && is_max) |

322 | { |

323 | /* We cannot deal with empty ranges, drop to varying. |

324 | ??? This could be VR_UNDEFINED instead. */ |

325 | set_value_range_to_varying (vr); |

326 | return; |

327 | } |

328 | else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |

329 | && (is_min || is_max)) |

330 | { |

331 | /* Non-empty boolean ranges can always be represented |

332 | as a singleton range. */ |

333 | if (is_min) |

334 | min = max = vrp_val_max (TREE_TYPE (min)); |

335 | else |

336 | min = max = vrp_val_min (TREE_TYPE (min)); |

337 | t = VR_RANGE; |

338 | } |

339 | else if (is_min |

340 | /* As a special exception preserve non-null ranges. */ |

341 | && !(TYPE_UNSIGNED (TREE_TYPE (min)) |

342 | && integer_zerop (max))) |

343 | { |

344 | tree one = build_int_cst (TREE_TYPE (max), 1); |

345 | min = int_const_binop (PLUS_EXPR, max, one); |

346 | max = vrp_val_max (TREE_TYPE (max)); |

347 | t = VR_RANGE; |

348 | } |

349 | else if (is_max) |

350 | { |

351 | tree one = build_int_cst (TREE_TYPE (min), 1); |

352 | max = int_const_binop (MINUS_EXPR, min, one); |

353 | min = vrp_val_min (TREE_TYPE (min)); |

354 | t = VR_RANGE; |

355 | } |

356 | } |

357 | |

358 | /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky |

359 | to make sure VRP iteration terminates, otherwise we can get into |

360 | oscillations. */ |

361 | |

362 | set_value_range (vr, t, min, max, equiv); |

363 | } |

364 | |

365 | /* Copy value range FROM into value range TO. */ |

366 | |

367 | void |

368 | copy_value_range (value_range *to, value_range *from) |

369 | { |

370 | set_value_range (to, from->type, from->min, from->max, from->equiv); |

371 | } |

372 | |

373 | /* Set value range VR to a single value. This function is only called |

374 | with values we get from statements, and exists to clear the |

375 | TREE_OVERFLOW flag. */ |

376 | |

377 | void |

378 | set_value_range_to_value (value_range *vr, tree val, bitmap equiv) |

379 | { |

380 | gcc_assert (is_gimple_min_invariant (val)); |

381 | if (TREE_OVERFLOW_P (val)) |

382 | val = drop_tree_overflow (val); |

383 | set_value_range (vr, VR_RANGE, val, val, equiv); |

384 | } |

385 | |

386 | /* Set value range VR to a non-NULL range of type TYPE. */ |

387 | |

388 | void |

389 | set_value_range_to_nonnull (value_range *vr, tree type) |

390 | { |

391 | tree zero = build_int_cst (type, 0); |

392 | set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); |

393 | } |

394 | |

395 | |

396 | /* Set value range VR to a NULL range of type TYPE. */ |

397 | |

398 | void |

399 | set_value_range_to_null (value_range *vr, tree type) |

400 | { |

401 | set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |

402 | } |

403 | |

404 | |

405 | /* If abs (min) < abs (max), set VR to [-max, max], if |

406 | abs (min) >= abs (max), set VR to [-min, min]. */ |

407 | |

408 | static void |

409 | abs_extent_range (value_range *vr, tree min, tree max) |

410 | { |

411 | int cmp; |

412 | |

413 | gcc_assert (TREE_CODE (min) == INTEGER_CST); |

414 | gcc_assert (TREE_CODE (max) == INTEGER_CST); |

415 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); |

416 | gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); |

417 | min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); |

418 | max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); |

419 | if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) |

420 | { |

421 | set_value_range_to_varying (vr); |

422 | return; |

423 | } |

424 | cmp = compare_values (min, max); |

425 | if (cmp == -1) |

426 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); |

427 | else if (cmp == 0 || cmp == 1) |

428 | { |

429 | max = min; |

430 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); |

431 | } |

432 | else |

433 | { |

434 | set_value_range_to_varying (vr); |

435 | return; |

436 | } |

437 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |

438 | } |

439 | |

440 | /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |

441 | |

442 | bool |

443 | vrp_operand_equal_p (const_tree val1, const_tree val2) |

444 | { |

445 | if (val1 == val2) |

446 | return true; |

447 | if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) |

448 | return false; |

449 | return true; |

450 | } |

451 | |

452 | /* Return true, if the bitmaps B1 and B2 are equal. */ |

453 | |

454 | bool |

455 | vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |

456 | { |

457 | return (b1 == b2 |

458 | || ((!b1 || bitmap_empty_p (b1)) |

459 | && (!b2 || bitmap_empty_p (b2))) |

460 | || (b1 && b2 |

461 | && bitmap_equal_p (b1, b2))); |

462 | } |

463 | |

464 | /* Return true if VR is ~[0, 0]. */ |

465 | |

466 | bool |

467 | range_is_nonnull (value_range *vr) |

468 | { |

469 | return vr->type == VR_ANTI_RANGE |

470 | && integer_zerop (vr->min) |

471 | && integer_zerop (vr->max); |

472 | } |

473 | |

474 | |

475 | /* Return true if VR is [0, 0]. */ |

476 | |

477 | static inline bool |

478 | range_is_null (value_range *vr) |

479 | { |

480 | return vr->type == VR_RANGE |

481 | && integer_zerop (vr->min) |

482 | && integer_zerop (vr->max); |

483 | } |

484 | |

485 | /* Return true if max and min of VR are INTEGER_CST. It's not necessary |

486 | a singleton. */ |

487 | |

488 | bool |

489 | range_int_cst_p (value_range *vr) |

490 | { |

491 | return (vr->type == VR_RANGE |

492 | && TREE_CODE (vr->max) == INTEGER_CST |

493 | && TREE_CODE (vr->min) == INTEGER_CST); |

494 | } |

495 | |

496 | /* Return true if VR is a INTEGER_CST singleton. */ |

497 | |

498 | bool |

499 | range_int_cst_singleton_p (value_range *vr) |

500 | { |

501 | return (range_int_cst_p (vr) |

502 | && tree_int_cst_equal (vr->min, vr->max)); |

503 | } |

504 | |

505 | /* Return true if value range VR involves at least one symbol. */ |

506 | |

507 | bool |

508 | symbolic_range_p (value_range *vr) |

509 | { |

510 | return (!is_gimple_min_invariant (vr->min) |

511 | || !is_gimple_min_invariant (vr->max)); |

512 | } |

513 | |

514 | /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |

515 | otherwise. We only handle additive operations and set NEG to true if the |

516 | symbol is negated and INV to the invariant part, if any. */ |

517 | |

518 | tree |

519 | get_single_symbol (tree t, bool *neg, tree *inv) |

520 | { |

521 | bool neg_; |

522 | tree inv_; |

523 | |

524 | *inv = NULL_TREE; |

525 | *neg = false; |

526 | |

527 | if (TREE_CODE (t) == PLUS_EXPR |

528 | || TREE_CODE (t) == POINTER_PLUS_EXPR |

529 | || TREE_CODE (t) == MINUS_EXPR) |

530 | { |

531 | if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) |

532 | { |

533 | neg_ = (TREE_CODE (t) == MINUS_EXPR); |

534 | inv_ = TREE_OPERAND (t, 0); |

535 | t = TREE_OPERAND (t, 1); |

536 | } |

537 | else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) |

538 | { |

539 | neg_ = false; |

540 | inv_ = TREE_OPERAND (t, 1); |

541 | t = TREE_OPERAND (t, 0); |

542 | } |

543 | else |

544 | return NULL_TREE; |

545 | } |

546 | else |

547 | { |

548 | neg_ = false; |

549 | inv_ = NULL_TREE; |

550 | } |

551 | |

552 | if (TREE_CODE (t) == NEGATE_EXPR) |

553 | { |

554 | t = TREE_OPERAND (t, 0); |

555 | neg_ = !neg_; |

556 | } |

557 | |

558 | if (TREE_CODE (t) != SSA_NAME) |

559 | return NULL_TREE; |

560 | |

561 | if (inv_ && TREE_OVERFLOW_P (inv_)) |

562 | inv_ = drop_tree_overflow (inv_); |

563 | |

564 | *neg = neg_; |

565 | *inv = inv_; |

566 | return t; |

567 | } |

568 | |

569 | /* The reverse operation: build a symbolic expression with TYPE |

570 | from symbol SYM, negated according to NEG, and invariant INV. */ |

571 | |

572 | static tree |

573 | build_symbolic_expr (tree type, tree sym, bool neg, tree inv) |

574 | { |

575 | const bool pointer_p = POINTER_TYPE_P (type); |

576 | tree t = sym; |

577 | |

578 | if (neg) |

579 | t = build1 (NEGATE_EXPR, type, t); |

580 | |

581 | if (integer_zerop (inv)) |

582 | return t; |

583 | |

584 | return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); |

585 | } |

586 | |

587 | /* Return |

588 | 1 if VAL < VAL2 |

589 | 0 if !(VAL < VAL2) |

590 | -2 if those are incomparable. */ |

591 | int |

592 | operand_less_p (tree val, tree val2) |

593 | { |

594 | /* LT is folded faster than GE and others. Inline the common case. */ |

595 | if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) |

596 | return tree_int_cst_lt (val, val2); |

597 | else |

598 | { |

599 | tree tcmp; |

600 | |

601 | fold_defer_overflow_warnings (); |

602 | |

603 | tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |

604 | |

605 | fold_undefer_and_ignore_overflow_warnings (); |

606 | |

607 | if (!tcmp |

608 | || TREE_CODE (tcmp) != INTEGER_CST) |

609 | return -2; |

610 | |

611 | if (!integer_zerop (tcmp)) |

612 | return 1; |

613 | } |

614 | |

615 | return 0; |

616 | } |

617 | |

618 | /* Compare two values VAL1 and VAL2. Return |

619 | |

620 | -2 if VAL1 and VAL2 cannot be compared at compile-time, |

621 | -1 if VAL1 < VAL2, |

622 | 0 if VAL1 == VAL2, |

623 | +1 if VAL1 > VAL2, and |

624 | +2 if VAL1 != VAL2 |

625 | |

626 | This is similar to tree_int_cst_compare but supports pointer values |

627 | and values that cannot be compared at compile time. |

628 | |

629 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to |

630 | true if the return value is only valid if we assume that signed |

631 | overflow is undefined. */ |

632 | |

633 | int |

634 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |

635 | { |

636 | if (val1 == val2) |

637 | return 0; |

638 | |

639 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |

640 | both integers. */ |

641 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) |

642 | == POINTER_TYPE_P (TREE_TYPE (val2))); |

643 | |

644 | /* Convert the two values into the same type. This is needed because |

645 | sizetype causes sign extension even for unsigned types. */ |

646 | val2 = fold_convert (TREE_TYPE (val1), val2); |

647 | STRIP_USELESS_TYPE_CONVERSION (val2); |

648 | |

649 | const bool overflow_undefined |

650 | = INTEGRAL_TYPE_P (TREE_TYPE (val1)) |

651 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)); |

652 | tree inv1, inv2; |

653 | bool neg1, neg2; |

654 | tree sym1 = get_single_symbol (val1, &neg1, &inv1); |

655 | tree sym2 = get_single_symbol (val2, &neg2, &inv2); |

656 | |

657 | /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1 |

658 | accordingly. If VAL1 and VAL2 don't use the same name, return -2. */ |

659 | if (sym1 && sym2) |

660 | { |

661 | /* Both values must use the same name with the same sign. */ |

662 | if (sym1 != sym2 || neg1 != neg2) |

663 | return -2; |

664 | |

665 | /* [-]NAME + CST == [-]NAME + CST. */ |

666 | if (inv1 == inv2) |

667 | return 0; |

668 | |

669 | /* If overflow is defined we cannot simplify more. */ |

670 | if (!overflow_undefined) |

671 | return -2; |

672 | |

673 | if (strict_overflow_p != NULL |

674 | /* Symbolic range building sets TREE_NO_WARNING to declare |

675 | that overflow doesn't happen. */ |

676 | && (!inv1 || !TREE_NO_WARNING (val1)) |

677 | && (!inv2 || !TREE_NO_WARNING (val2))) |

678 | *strict_overflow_p = true; |

679 | |

680 | if (!inv1) |

681 | inv1 = build_int_cst (TREE_TYPE (val1), 0); |

682 | if (!inv2) |

683 | inv2 = build_int_cst (TREE_TYPE (val2), 0); |

684 | |

685 | return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2), |

686 | TYPE_SIGN (TREE_TYPE (val1))); |

687 | } |

688 | |

689 | const bool cst1 = is_gimple_min_invariant (val1); |

690 | const bool cst2 = is_gimple_min_invariant (val2); |

691 | |

692 | /* If one is of the form '[-]NAME + CST' and the other is constant, then |

693 | it might be possible to say something depending on the constants. */ |

694 | if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1)) |

695 | { |

696 | if (!overflow_undefined) |

697 | return -2; |

698 | |

699 | if (strict_overflow_p != NULL |

700 | /* Symbolic range building sets TREE_NO_WARNING to declare |

701 | that overflow doesn't happen. */ |

702 | && (!sym1 || !TREE_NO_WARNING (val1)) |

703 | && (!sym2 || !TREE_NO_WARNING (val2))) |

704 | *strict_overflow_p = true; |

705 | |

706 | const signop sgn = TYPE_SIGN (TREE_TYPE (val1)); |

707 | tree cst = cst1 ? val1 : val2; |

708 | tree inv = cst1 ? inv2 : inv1; |

709 | |

710 | /* Compute the difference between the constants. If it overflows or |

711 | underflows, this means that we can trivially compare the NAME with |

712 | it and, consequently, the two values with each other. */ |

713 | wide_int diff = wi::to_wide (cst) - wi::to_wide (inv); |

714 | if (wi::cmp (0, wi::to_wide (inv), sgn) |

715 | != wi::cmp (diff, wi::to_wide (cst), sgn)) |

716 | { |

717 | const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn); |

718 | return cst1 ? res : -res; |

719 | } |

720 | |

721 | return -2; |

722 | } |

723 | |

724 | /* We cannot say anything more for non-constants. */ |

725 | if (!cst1 || !cst2) |

726 | return -2; |

727 | |

728 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |

729 | { |

730 | /* We cannot compare overflowed values. */ |

731 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |

732 | return -2; |

733 | |

734 | return tree_int_cst_compare (val1, val2); |

735 | } |

736 | else |

737 | { |

738 | tree t; |

739 | |

740 | /* First see if VAL1 and VAL2 are not the same. */ |

741 | if (val1 == val2 || operand_equal_p (val1, val2, 0)) |

742 | return 0; |

743 | |

744 | /* If VAL1 is a lower address than VAL2, return -1. */ |

745 | if (operand_less_p (val1, val2) == 1) |

746 | return -1; |

747 | |

748 | /* If VAL1 is a higher address than VAL2, return +1. */ |

749 | if (operand_less_p (val2, val1) == 1) |

750 | return 1; |

751 | |

752 | /* If VAL1 is different than VAL2, return +2. |

753 | For integer constants we either have already returned -1 or 1 |

754 | or they are equivalent. We still might succeed in proving |

755 | something about non-trivial operands. */ |

756 | if (TREE_CODE (val1) != INTEGER_CST |

757 | || TREE_CODE (val2) != INTEGER_CST) |

758 | { |

759 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); |

760 | if (t && integer_onep (t)) |

761 | return 2; |

762 | } |

763 | |

764 | return -2; |

765 | } |

766 | } |

767 | |

768 | /* Compare values like compare_values_warnv. */ |

769 | |

770 | int |

771 | compare_values (tree val1, tree val2) |

772 | { |

773 | bool sop; |

774 | return compare_values_warnv (val1, val2, &sop); |

775 | } |

776 | |

777 | |

778 | /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |

779 | 0 if VAL is not inside [MIN, MAX], |

780 | -2 if we cannot tell either way. |

781 | |

782 | Benchmark compile/20001226-1.c compilation time after changing this |

783 | function. */ |

784 | |

785 | int |

786 | value_inside_range (tree val, tree min, tree max) |

787 | { |

788 | int cmp1, cmp2; |

789 | |

790 | cmp1 = operand_less_p (val, min); |

791 | if (cmp1 == -2) |

792 | return -2; |

793 | if (cmp1 == 1) |

794 | return 0; |

795 | |

796 | cmp2 = operand_less_p (max, val); |

797 | if (cmp2 == -2) |

798 | return -2; |

799 | |

800 | return !cmp2; |

801 | } |

802 | |

803 | |

804 | /* Return true if value ranges VR0 and VR1 have a non-empty |

805 | intersection. |

806 | |

807 | Benchmark compile/20001226-1.c compilation time after changing this |

808 | function. |

809 | */ |

810 | |

811 | static inline bool |

812 | value_ranges_intersect_p (value_range *vr0, value_range *vr1) |

813 | { |

814 | /* The value ranges do not intersect if the maximum of the first range is |

815 | less than the minimum of the second range or vice versa. |

816 | When those relations are unknown, we can't do any better. */ |

817 | if (operand_less_p (vr0->max, vr1->min) != 0) |

818 | return false; |

819 | if (operand_less_p (vr1->max, vr0->min) != 0) |

820 | return false; |

821 | return true; |

822 | } |

823 | |

824 | |

825 | /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not |

826 | include the value zero, -2 if we cannot tell. */ |

827 | |

828 | int |

829 | range_includes_zero_p (tree min, tree max) |

830 | { |

831 | tree zero = build_int_cst (TREE_TYPE (min), 0); |

832 | return value_inside_range (zero, min, max); |

833 | } |

834 | |

835 | /* Return true if *VR is know to only contain nonnegative values. */ |

836 | |

837 | static inline bool |

838 | value_range_nonnegative_p (value_range *vr) |

839 | { |

840 | /* Testing for VR_ANTI_RANGE is not useful here as any anti-range |

841 | which would return a useful value should be encoded as a |

842 | VR_RANGE. */ |

843 | if (vr->type == VR_RANGE) |

844 | { |

845 | int result = compare_values (vr->min, integer_zero_node); |

846 | return (result == 0 || result == 1); |

847 | } |

848 | |

849 | return false; |

850 | } |

851 | |

852 | /* If *VR has a value rante that is a single constant value return that, |

853 | otherwise return NULL_TREE. */ |

854 | |

855 | tree |

856 | value_range_constant_singleton (value_range *vr) |

857 | { |

858 | if (vr->type == VR_RANGE |

859 | && vrp_operand_equal_p (vr->min, vr->max) |

860 | && is_gimple_min_invariant (vr->min)) |

861 | return vr->min; |

862 | |

863 | return NULL_TREE; |

864 | } |

865 | |

866 | /* Wrapper around int_const_binop. Return true if we can compute the |

867 | result; i.e. if the operation doesn't overflow or if the overflow is |

868 | undefined. In the latter case (if the operation overflows and |

869 | overflow is undefined), then adjust the result to be -INF or +INF |

870 | depending on CODE, VAL1 and VAL2. Return the value in *RES. |

871 | |

872 | Return false for division by zero, for which the result is |

873 | indeterminate. */ |

874 | |

875 | static bool |

876 | vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res) |

877 | { |

878 | bool overflow = false; |

879 | signop sign = TYPE_SIGN (TREE_TYPE (val1)); |

880 | |

881 | switch (code) |

882 | { |

883 | case RSHIFT_EXPR: |

884 | case LSHIFT_EXPR: |

885 | { |

886 | wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1))); |

887 | if (wi::neg_p (wval2)) |

888 | { |

889 | wval2 = -wval2; |

890 | if (code == RSHIFT_EXPR) |

891 | code = LSHIFT_EXPR; |

892 | else |

893 | code = RSHIFT_EXPR; |

894 | } |

895 | |

896 | if (code == RSHIFT_EXPR) |

897 | /* It's unclear from the C standard whether shifts can overflow. |

898 | The following code ignores overflow; perhaps a C standard |

899 | interpretation ruling is needed. */ |

900 | *res = wi::rshift (wi::to_wide (val1), wval2, sign); |

901 | else |

902 | *res = wi::lshift (wi::to_wide (val1), wval2); |

903 | break; |

904 | } |

905 | |

906 | case MULT_EXPR: |

907 | *res = wi::mul (wi::to_wide (val1), |

908 | wi::to_wide (val2), sign, &overflow); |

909 | break; |

910 | |

911 | case TRUNC_DIV_EXPR: |

912 | case EXACT_DIV_EXPR: |

913 | if (val2 == 0) |

914 | return false; |

915 | else |

916 | *res = wi::div_trunc (wi::to_wide (val1), |

917 | wi::to_wide (val2), sign, &overflow); |

918 | break; |

919 | |

920 | case FLOOR_DIV_EXPR: |

921 | if (val2 == 0) |

922 | return false; |

923 | *res = wi::div_floor (wi::to_wide (val1), |

924 | wi::to_wide (val2), sign, &overflow); |

925 | break; |

926 | |

927 | case CEIL_DIV_EXPR: |

928 | if (val2 == 0) |

929 | return false; |

930 | *res = wi::div_ceil (wi::to_wide (val1), |

931 | wi::to_wide (val2), sign, &overflow); |

932 | break; |

933 | |

934 | case ROUND_DIV_EXPR: |

935 | if (val2 == 0) |

936 | return false; |

937 | *res = wi::div_round (wi::to_wide (val1), |

938 | wi::to_wide (val2), sign, &overflow); |

939 | break; |

940 | |

941 | default: |

942 | gcc_unreachable (); |

943 | } |

944 | |

945 | if (overflow |

946 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |

947 | { |

948 | /* If the operation overflowed return -INF or +INF depending |

949 | on the operation and the combination of signs of the operands. */ |

950 | int sgn1 = tree_int_cst_sgn (val1); |

951 | int sgn2 = tree_int_cst_sgn (val2); |

952 | |

953 | /* Notice that we only need to handle the restricted set of |

954 | operations handled by extract_range_from_binary_expr. |

955 | Among them, only multiplication, addition and subtraction |

956 | can yield overflow without overflown operands because we |

957 | are working with integral types only... except in the |

958 | case VAL1 = -INF and VAL2 = -1 which overflows to +INF |

959 | for division too. */ |

960 | |

961 | /* For multiplication, the sign of the overflow is given |

962 | by the comparison of the signs of the operands. */ |

963 | if ((code == MULT_EXPR && sgn1 == sgn2) |

964 | /* For addition, the operands must be of the same sign |

965 | to yield an overflow. Its sign is therefore that |

966 | of one of the operands, for example the first. */ |

967 | || (code == PLUS_EXPR && sgn1 >= 0) |

968 | /* For subtraction, operands must be of |

969 | different signs to yield an overflow. Its sign is |

970 | therefore that of the first operand or the opposite of |

971 | that of the second operand. A first operand of 0 counts |

972 | as positive here, for the corner case 0 - (-INF), which |

973 | overflows, but must yield +INF. */ |

974 | || (code == MINUS_EXPR && sgn1 >= 0) |

975 | /* For division, the only case is -INF / -1 = +INF. */ |

976 | || code == TRUNC_DIV_EXPR |

977 | || code == FLOOR_DIV_EXPR |

978 | || code == CEIL_DIV_EXPR |

979 | || code == EXACT_DIV_EXPR |

980 | || code == ROUND_DIV_EXPR) |

981 | *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), |

982 | TYPE_SIGN (TREE_TYPE (val1))); |

983 | else |

984 | *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), |

985 | TYPE_SIGN (TREE_TYPE (val1))); |

986 | return true; |

987 | } |

988 | |

989 | return !overflow; |

990 | } |

991 | |

992 | |

993 | /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO |

994 | bitmask if some bit is unset, it means for all numbers in the range |

995 | the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO |

996 | bitmask if some bit is set, it means for all numbers in the range |

997 | the bit is 1, otherwise it might be 0 or 1. */ |

998 | |

999 | bool |

1000 | zero_nonzero_bits_from_vr (const tree expr_type, |

1001 | value_range *vr, |

1002 | wide_int *may_be_nonzero, |

1003 | wide_int *must_be_nonzero) |

1004 | { |

1005 | *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |

1006 | *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); |

1007 | if (!range_int_cst_p (vr)) |

1008 | return false; |

1009 | |

1010 | if (range_int_cst_singleton_p (vr)) |

1011 | { |

1012 | *may_be_nonzero = wi::to_wide (vr->min); |

1013 | *must_be_nonzero = *may_be_nonzero; |

1014 | } |

1015 | else if (tree_int_cst_sgn (vr->min) >= 0 |

1016 | || tree_int_cst_sgn (vr->max) < 0) |

1017 | { |

1018 | wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max); |

1019 | *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max); |

1020 | *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max); |

1021 | if (xor_mask != 0) |

1022 | { |

1023 | wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, |

1024 | may_be_nonzero->get_precision ()); |

1025 | *may_be_nonzero = *may_be_nonzero | mask; |

1026 | *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask); |

1027 | } |

1028 | } |

1029 | |

1030 | return true; |

1031 | } |

1032 | |

1033 | /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |

1034 | so that *VR0 U *VR1 == *AR. Returns true if that is possible, |

1035 | false otherwise. If *AR can be represented with a single range |

1036 | *VR1 will be VR_UNDEFINED. */ |

1037 | |

1038 | static bool |

1039 | ranges_from_anti_range (value_range *ar, |

1040 | value_range *vr0, value_range *vr1) |

1041 | { |

1042 | tree type = TREE_TYPE (ar->min); |

1043 | |

1044 | vr0->type = VR_UNDEFINED; |

1045 | vr1->type = VR_UNDEFINED; |

1046 | |

1047 | if (ar->type != VR_ANTI_RANGE |

1048 | || TREE_CODE (ar->min) != INTEGER_CST |

1049 | || TREE_CODE (ar->max) != INTEGER_CST |

1050 | || !vrp_val_min (type) |

1051 | || !vrp_val_max (type)) |

1052 | return false; |

1053 | |

1054 | if (!vrp_val_is_min (ar->min)) |

1055 | { |

1056 | vr0->type = VR_RANGE; |

1057 | vr0->min = vrp_val_min (type); |

1058 | vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1); |

1059 | } |

1060 | if (!vrp_val_is_max (ar->max)) |

1061 | { |

1062 | vr1->type = VR_RANGE; |

1063 | vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1); |

1064 | vr1->max = vrp_val_max (type); |

1065 | } |

1066 | if (vr0->type == VR_UNDEFINED) |

1067 | { |

1068 | *vr0 = *vr1; |

1069 | vr1->type = VR_UNDEFINED; |

1070 | } |

1071 | |

1072 | return vr0->type != VR_UNDEFINED; |

1073 | } |

1074 | |

1075 | /* Helper to extract a value-range *VR for a multiplicative operation |

1076 | *VR0 CODE *VR1. */ |

1077 | |

1078 | static void |

1079 | extract_range_from_multiplicative_op_1 (value_range *vr, |

1080 | enum tree_code code, |

1081 | value_range *vr0, value_range *vr1) |

1082 | { |

1083 | enum value_range_type rtype; |

1084 | wide_int val, min, max; |

1085 | tree type; |

1086 | |

1087 | /* Multiplications, divisions and shifts are a bit tricky to handle, |

1088 | depending on the mix of signs we have in the two ranges, we |

1089 | need to operate on different values to get the minimum and |

1090 | maximum values for the new range. One approach is to figure |

1091 | out all the variations of range combinations and do the |

1092 | operations. |

1093 | |

1094 | However, this involves several calls to compare_values and it |

1095 | is pretty convoluted. It's simpler to do the 4 operations |

1096 | (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP |

1097 | MAX1) and then figure the smallest and largest values to form |

1098 | the new range. */ |

1099 | gcc_assert (code == MULT_EXPR |

1100 | || code == TRUNC_DIV_EXPR |

1101 | || code == FLOOR_DIV_EXPR |

1102 | || code == CEIL_DIV_EXPR |

1103 | || code == EXACT_DIV_EXPR |

1104 | || code == ROUND_DIV_EXPR |

1105 | || code == RSHIFT_EXPR |

1106 | || code == LSHIFT_EXPR); |

1107 | gcc_assert (vr0->type == VR_RANGE |

1108 | && vr0->type == vr1->type); |

1109 | |

1110 | rtype = vr0->type; |

1111 | type = TREE_TYPE (vr0->min); |

1112 | signop sgn = TYPE_SIGN (type); |

1113 | |

1114 | /* Compute the 4 cross operations and their minimum and maximum value. */ |

1115 | if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val)) |

1116 | { |

1117 | set_value_range_to_varying (vr); |

1118 | return; |

1119 | } |

1120 | min = max = val; |

1121 | |

1122 | if (vr1->max != vr1->min) |

1123 | { |

1124 | if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val)) |

1125 | { |

1126 | set_value_range_to_varying (vr); |

1127 | return; |

1128 | } |

1129 | if (wi::lt_p (val, min, sgn)) |

1130 | min = val; |

1131 | else if (wi::gt_p (val, max, sgn)) |

1132 | max = val; |

1133 | } |

1134 | |

1135 | if (vr0->max != vr0->min) |

1136 | { |

1137 | if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val)) |

1138 | { |

1139 | set_value_range_to_varying (vr); |

1140 | return; |

1141 | } |

1142 | if (wi::lt_p (val, min, sgn)) |

1143 | min = val; |

1144 | else if (wi::gt_p (val, max, sgn)) |

1145 | max = val; |

1146 | } |

1147 | |

1148 | if (vr0->min != vr0->max && vr1->min != vr1->max) |

1149 | { |

1150 | if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val)) |

1151 | { |

1152 | set_value_range_to_varying (vr); |

1153 | return; |

1154 | } |

1155 | if (wi::lt_p (val, min, sgn)) |

1156 | min = val; |

1157 | else if (wi::gt_p (val, max, sgn)) |

1158 | max = val; |

1159 | } |

1160 | |

1161 | /* If the new range has its limits swapped around (MIN > MAX), |

1162 | then the operation caused one of them to wrap around, mark |

1163 | the new range VARYING. */ |

1164 | if (wi::gt_p (min, max, sgn)) |

1165 | { |

1166 | set_value_range_to_varying (vr); |

1167 | return; |

1168 | } |

1169 | |

1170 | /* We punt for [-INF, +INF]. |

1171 | We learn nothing when we have INF on both sides. |

1172 | Note that we do accept [-INF, -INF] and [+INF, +INF]. */ |

1173 | if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn)) |

1174 | && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn))) |

1175 | { |

1176 | set_value_range_to_varying (vr); |

1177 | return; |

1178 | } |

1179 | |

1180 | set_value_range (vr, rtype, |

1181 | wide_int_to_tree (type, min), |

1182 | wide_int_to_tree (type, max), NULL); |

1183 | } |

1184 | |

1185 | /* Extract range information from a binary operation CODE based on |

1186 | the ranges of each of its operands *VR0 and *VR1 with resulting |

1187 | type EXPR_TYPE. The resulting range is stored in *VR. */ |

1188 | |

1189 | void |

1190 | extract_range_from_binary_expr_1 (value_range *vr, |

1191 | enum tree_code code, tree expr_type, |

1192 | value_range *vr0_, value_range *vr1_) |

1193 | { |

1194 | value_range vr0 = *vr0_, vr1 = *vr1_; |

1195 | value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |

1196 | enum value_range_type type; |

1197 | tree min = NULL_TREE, max = NULL_TREE; |

1198 | int cmp; |

1199 | |

1200 | if (!INTEGRAL_TYPE_P (expr_type) |

1201 | && !POINTER_TYPE_P (expr_type)) |

1202 | { |

1203 | set_value_range_to_varying (vr); |

1204 | return; |

1205 | } |

1206 | |

1207 | /* Not all binary expressions can be applied to ranges in a |

1208 | meaningful way. Handle only arithmetic operations. */ |

1209 | if (code != PLUS_EXPR |

1210 | && code != MINUS_EXPR |

1211 | && code != POINTER_PLUS_EXPR |

1212 | && code != MULT_EXPR |

1213 | && code != TRUNC_DIV_EXPR |

1214 | && code != FLOOR_DIV_EXPR |

1215 | && code != CEIL_DIV_EXPR |

1216 | && code != EXACT_DIV_EXPR |

1217 | && code != ROUND_DIV_EXPR |

1218 | && code != TRUNC_MOD_EXPR |

1219 | && code != RSHIFT_EXPR |

1220 | && code != LSHIFT_EXPR |

1221 | && code != MIN_EXPR |

1222 | && code != MAX_EXPR |

1223 | && code != BIT_AND_EXPR |

1224 | && code != BIT_IOR_EXPR |

1225 | && code != BIT_XOR_EXPR) |

1226 | { |

1227 | set_value_range_to_varying (vr); |

1228 | return; |

1229 | } |

1230 | |

1231 | /* If both ranges are UNDEFINED, so is the result. */ |

1232 | if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) |

1233 | { |

1234 | set_value_range_to_undefined (vr); |

1235 | return; |

1236 | } |

1237 | /* If one of the ranges is UNDEFINED drop it to VARYING for the following |

1238 | code. At some point we may want to special-case operations that |

1239 | have UNDEFINED result for all or some value-ranges of the not UNDEFINED |

1240 | operand. */ |

1241 | else if (vr0.type == VR_UNDEFINED) |

1242 | set_value_range_to_varying (&vr0); |

1243 | else if (vr1.type == VR_UNDEFINED) |

1244 | set_value_range_to_varying (&vr1); |

1245 | |

1246 | /* We get imprecise results from ranges_from_anti_range when |

1247 | code is EXACT_DIV_EXPR. We could mask out bits in the resulting |

1248 | range, but then we also need to hack up vrp_meet. It's just |

1249 | easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ |

1250 | if (code == EXACT_DIV_EXPR |

1251 | && vr0.type == VR_ANTI_RANGE |

1252 | && vr0.min == vr0.max |

1253 | && integer_zerop (vr0.min)) |

1254 | { |

1255 | set_value_range_to_nonnull (vr, expr_type); |

1256 | return; |

1257 | } |

1258 | |

1259 | /* Now canonicalize anti-ranges to ranges when they are not symbolic |

1260 | and express ~[] op X as ([]' op X) U ([]'' op X). */ |

1261 | if (vr0.type == VR_ANTI_RANGE |

1262 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |

1263 | { |

1264 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); |

1265 | if (vrtem1.type != VR_UNDEFINED) |

1266 | { |

1267 | value_range vrres = VR_INITIALIZER; |

1268 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, |

1269 | &vrtem1, vr1_); |

1270 | vrp_meet (vr, &vrres); |

1271 | } |

1272 | return; |

1273 | } |

1274 | /* Likewise for X op ~[]. */ |

1275 | if (vr1.type == VR_ANTI_RANGE |

1276 | && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) |

1277 | { |

1278 | extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); |

1279 | if (vrtem1.type != VR_UNDEFINED) |

1280 | { |

1281 | value_range vrres = VR_INITIALIZER; |

1282 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, |

1283 | vr0_, &vrtem1); |

1284 | vrp_meet (vr, &vrres); |

1285 | } |

1286 | return; |

1287 | } |

1288 | |

1289 | /* The type of the resulting value range defaults to VR0.TYPE. */ |

1290 | type = vr0.type; |

1291 | |

1292 | /* Refuse to operate on VARYING ranges, ranges of different kinds |

1293 | and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |

1294 | because we may be able to derive a useful range even if one of |

1295 | the operands is VR_VARYING or symbolic range. Similarly for |

1296 | divisions, MIN/MAX and PLUS/MINUS. |

1297 | |

1298 | TODO, we may be able to derive anti-ranges in some cases. */ |

1299 | if (code != BIT_AND_EXPR |

1300 | && code != BIT_IOR_EXPR |

1301 | && code != TRUNC_DIV_EXPR |

1302 | && code != FLOOR_DIV_EXPR |

1303 | && code != CEIL_DIV_EXPR |

1304 | && code != EXACT_DIV_EXPR |

1305 | && code != ROUND_DIV_EXPR |

1306 | && code != TRUNC_MOD_EXPR |

1307 | && code != MIN_EXPR |

1308 | && code != MAX_EXPR |

1309 | && code != PLUS_EXPR |

1310 | && code != MINUS_EXPR |

1311 | && code != RSHIFT_EXPR |

1312 | && (vr0.type == VR_VARYING |

1313 | || vr1.type == VR_VARYING |

1314 | || vr0.type != vr1.type |

1315 | || symbolic_range_p (&vr0) |

1316 | || symbolic_range_p (&vr1))) |

1317 | { |

1318 | set_value_range_to_varying (vr); |

1319 | return; |

1320 | } |

1321 | |

1322 | /* Now evaluate the expression to determine the new range. */ |

1323 | if (POINTER_TYPE_P (expr_type)) |

1324 | { |

1325 | if (code == MIN_EXPR || code == MAX_EXPR) |

1326 | { |

1327 | /* For MIN/MAX expressions with pointers, we only care about |

1328 | nullness, if both are non null, then the result is nonnull. |

1329 | If both are null, then the result is null. Otherwise they |

1330 | are varying. */ |

1331 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) |

1332 | set_value_range_to_nonnull (vr, expr_type); |

1333 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |

1334 | set_value_range_to_null (vr, expr_type); |

1335 | else |

1336 | set_value_range_to_varying (vr); |

1337 | } |

1338 | else if (code == POINTER_PLUS_EXPR) |

1339 | { |

1340 | /* For pointer types, we are really only interested in asserting |

1341 | whether the expression evaluates to non-NULL. */ |

1342 | if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) |

1343 | set_value_range_to_nonnull (vr, expr_type); |

1344 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |

1345 | set_value_range_to_null (vr, expr_type); |

1346 | else |

1347 | set_value_range_to_varying (vr); |

1348 | } |

1349 | else if (code == BIT_AND_EXPR) |

1350 | { |

1351 | /* For pointer types, we are really only interested in asserting |

1352 | whether the expression evaluates to non-NULL. */ |

1353 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) |

1354 | set_value_range_to_nonnull (vr, expr_type); |

1355 | else if (range_is_null (&vr0) || range_is_null (&vr1)) |

1356 | set_value_range_to_null (vr, expr_type); |

1357 | else |

1358 | set_value_range_to_varying (vr); |

1359 | } |

1360 | else |

1361 | set_value_range_to_varying (vr); |

1362 | |

1363 | return; |

1364 | } |

1365 | |

1366 | /* For integer ranges, apply the operation to each end of the |

1367 | range and see what we end up with. */ |

1368 | if (code == PLUS_EXPR || code == MINUS_EXPR) |

1369 | { |

1370 | const bool minus_p = (code == MINUS_EXPR); |

1371 | tree min_op0 = vr0.min; |

1372 | tree min_op1 = minus_p ? vr1.max : vr1.min; |

1373 | tree max_op0 = vr0.max; |

1374 | tree max_op1 = minus_p ? vr1.min : vr1.max; |

1375 | tree sym_min_op0 = NULL_TREE; |

1376 | tree sym_min_op1 = NULL_TREE; |

1377 | tree sym_max_op0 = NULL_TREE; |

1378 | tree sym_max_op1 = NULL_TREE; |

1379 | bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; |

1380 | |

1381 | /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or |

1382 | single-symbolic ranges, try to compute the precise resulting range, |

1383 | but only if we know that this resulting range will also be constant |

1384 | or single-symbolic. */ |

1385 | if (vr0.type == VR_RANGE && vr1.type == VR_RANGE |

1386 | && (TREE_CODE (min_op0) == INTEGER_CST |

1387 | || (sym_min_op0 |

1388 | = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) |

1389 | && (TREE_CODE (min_op1) == INTEGER_CST |

1390 | || (sym_min_op1 |

1391 | = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) |

1392 | && (!(sym_min_op0 && sym_min_op1) |

1393 | || (sym_min_op0 == sym_min_op1 |

1394 | && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) |

1395 | && (TREE_CODE (max_op0) == INTEGER_CST |

1396 | || (sym_max_op0 |

1397 | = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) |

1398 | && (TREE_CODE (max_op1) == INTEGER_CST |

1399 | || (sym_max_op1 |

1400 | = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) |

1401 | && (!(sym_max_op0 && sym_max_op1) |

1402 | || (sym_max_op0 == sym_max_op1 |

1403 | && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) |

1404 | { |

1405 | const signop sgn = TYPE_SIGN (expr_type); |

1406 | const unsigned int prec = TYPE_PRECISION (expr_type); |

1407 | wide_int type_min, type_max, wmin, wmax; |

1408 | int min_ovf = 0; |

1409 | int max_ovf = 0; |

1410 | |

1411 | /* Get the lower and upper bounds of the type. */ |

1412 | if (TYPE_OVERFLOW_WRAPS (expr_type)) |

1413 | { |

1414 | type_min = wi::min_value (prec, sgn); |

1415 | type_max = wi::max_value (prec, sgn); |

1416 | } |

1417 | else |

1418 | { |

1419 | type_min = wi::to_wide (vrp_val_min (expr_type)); |

1420 | type_max = wi::to_wide (vrp_val_max (expr_type)); |

1421 | } |

1422 | |

1423 | /* Combine the lower bounds, if any. */ |

1424 | if (min_op0 && min_op1) |

1425 | { |

1426 | if (minus_p) |

1427 | { |

1428 | wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1); |

1429 | |

1430 | /* Check for overflow. */ |

1431 | if (wi::cmp (0, wi::to_wide (min_op1), sgn) |

1432 | != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) |

1433 | min_ovf = wi::cmp (wi::to_wide (min_op0), |

1434 | wi::to_wide (min_op1), sgn); |

1435 | } |

1436 | else |

1437 | { |

1438 | wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1); |

1439 | |

1440 | /* Check for overflow. */ |

1441 | if (wi::cmp (wi::to_wide (min_op1), 0, sgn) |

1442 | != wi::cmp (wmin, wi::to_wide (min_op0), sgn)) |

1443 | min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn); |

1444 | } |

1445 | } |

1446 | else if (min_op0) |

1447 | wmin = wi::to_wide (min_op0); |

1448 | else if (min_op1) |

1449 | { |

1450 | if (minus_p) |

1451 | { |

1452 | wmin = -wi::to_wide (min_op1); |

1453 | |

1454 | /* Check for overflow. */ |

1455 | if (sgn == SIGNED |

1456 | && wi::neg_p (wi::to_wide (min_op1)) |

1457 | && wi::neg_p (wmin)) |

1458 | min_ovf = 1; |

1459 | else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0) |

1460 | min_ovf = -1; |

1461 | } |

1462 | else |

1463 | wmin = wi::to_wide (min_op1); |

1464 | } |

1465 | else |

1466 | wmin = wi::shwi (0, prec); |

1467 | |

1468 | /* Combine the upper bounds, if any. */ |

1469 | if (max_op0 && max_op1) |

1470 | { |

1471 | if (minus_p) |

1472 | { |

1473 | wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1); |

1474 | |

1475 | /* Check for overflow. */ |

1476 | if (wi::cmp (0, wi::to_wide (max_op1), sgn) |

1477 | != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) |

1478 | max_ovf = wi::cmp (wi::to_wide (max_op0), |

1479 | wi::to_wide (max_op1), sgn); |

1480 | } |

1481 | else |

1482 | { |

1483 | wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1); |

1484 | |

1485 | if (wi::cmp (wi::to_wide (max_op1), 0, sgn) |

1486 | != wi::cmp (wmax, wi::to_wide (max_op0), sgn)) |

1487 | max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn); |

1488 | } |

1489 | } |

1490 | else if (max_op0) |

1491 | wmax = wi::to_wide (max_op0); |

1492 | else if (max_op1) |

1493 | { |

1494 | if (minus_p) |

1495 | { |

1496 | wmax = -wi::to_wide (max_op1); |

1497 | |

1498 | /* Check for overflow. */ |

1499 | if (sgn == SIGNED |

1500 | && wi::neg_p (wi::to_wide (max_op1)) |

1501 | && wi::neg_p (wmax)) |

1502 | max_ovf = 1; |

1503 | else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0) |

1504 | max_ovf = -1; |

1505 | } |

1506 | else |

1507 | wmax = wi::to_wide (max_op1); |

1508 | } |

1509 | else |

1510 | wmax = wi::shwi (0, prec); |

1511 | |

1512 | /* Check for type overflow. */ |

1513 | if (min_ovf == 0) |

1514 | { |

1515 | if (wi::cmp (wmin, type_min, sgn) == -1) |

1516 | min_ovf = -1; |

1517 | else if (wi::cmp (wmin, type_max, sgn) == 1) |

1518 | min_ovf = 1; |

1519 | } |

1520 | if (max_ovf == 0) |

1521 | { |

1522 | if (wi::cmp (wmax, type_min, sgn) == -1) |

1523 | max_ovf = -1; |

1524 | else if (wi::cmp (wmax, type_max, sgn) == 1) |

1525 | max_ovf = 1; |

1526 | } |

1527 | |

1528 | /* If we have overflow for the constant part and the resulting |

1529 | range will be symbolic, drop to VR_VARYING. */ |

1530 | if ((min_ovf && sym_min_op0 != sym_min_op1) |

1531 | || (max_ovf && sym_max_op0 != sym_max_op1)) |

1532 | { |

1533 | set_value_range_to_varying (vr); |

1534 | return; |

1535 | } |

1536 | |

1537 | if (TYPE_OVERFLOW_WRAPS (expr_type)) |

1538 | { |

1539 | /* If overflow wraps, truncate the values and adjust the |

1540 | range kind and bounds appropriately. */ |

1541 | wide_int tmin = wide_int::from (wmin, prec, sgn); |

1542 | wide_int tmax = wide_int::from (wmax, prec, sgn); |

1543 | if (min_ovf == max_ovf) |

1544 | { |

1545 | /* No overflow or both overflow or underflow. The |

1546 | range kind stays VR_RANGE. */ |

1547 | min = wide_int_to_tree (expr_type, tmin); |

1548 | max = wide_int_to_tree (expr_type, tmax); |

1549 | } |

1550 | else if ((min_ovf == -1 && max_ovf == 0) |

1551 | || (max_ovf == 1 && min_ovf == 0)) |

1552 | { |

1553 | /* Min underflow or max overflow. The range kind |

1554 | changes to VR_ANTI_RANGE. */ |

1555 | bool covers = false; |

1556 | wide_int tem = tmin; |

1557 | type = VR_ANTI_RANGE; |

1558 | tmin = tmax + 1; |

1559 | if (wi::cmp (tmin, tmax, sgn) < 0) |

1560 | covers = true; |

1561 | tmax = tem - 1; |

1562 | if (wi::cmp (tmax, tem, sgn) > 0) |

1563 | covers = true; |

1564 | /* If the anti-range would cover nothing, drop to varying. |

1565 | Likewise if the anti-range bounds are outside of the |

1566 | types values. */ |

1567 | if (covers || wi::cmp (tmin, tmax, sgn) > 0) |

1568 | { |

1569 | set_value_range_to_varying (vr); |

1570 | return; |

1571 | } |

1572 | min = wide_int_to_tree (expr_type, tmin); |

1573 | max = wide_int_to_tree (expr_type, tmax); |

1574 | } |

1575 | else |

1576 | { |

1577 | /* Other underflow and/or overflow, drop to VR_VARYING. */ |

1578 | set_value_range_to_varying (vr); |

1579 | return; |

1580 | } |

1581 | } |

1582 | else |

1583 | { |

1584 | /* If overflow does not wrap, saturate to the types min/max |

1585 | value. */ |

1586 | if (min_ovf == -1) |

1587 | min = wide_int_to_tree (expr_type, type_min); |

1588 | else if (min_ovf == 1) |

1589 | min = wide_int_to_tree (expr_type, type_max); |

1590 | else |

1591 | min = wide_int_to_tree (expr_type, wmin); |

1592 | |

1593 | if (max_ovf == -1) |

1594 | max = wide_int_to_tree (expr_type, type_min); |

1595 | else if (max_ovf == 1) |

1596 | max = wide_int_to_tree (expr_type, type_max); |

1597 | else |

1598 | max = wide_int_to_tree (expr_type, wmax); |

1599 | } |

1600 | |

1601 | /* If the result lower bound is constant, we're done; |

1602 | otherwise, build the symbolic lower bound. */ |

1603 | if (sym_min_op0 == sym_min_op1) |

1604 | ; |

1605 | else if (sym_min_op0) |

1606 | min = build_symbolic_expr (expr_type, sym_min_op0, |

1607 | neg_min_op0, min); |

1608 | else if (sym_min_op1) |

1609 | { |

1610 | /* We may not negate if that might introduce |

1611 | undefined overflow. */ |

1612 | if (! minus_p |

1613 | || neg_min_op1 |

1614 | || TYPE_OVERFLOW_WRAPS (expr_type)) |

1615 | min = build_symbolic_expr (expr_type, sym_min_op1, |

1616 | neg_min_op1 ^ minus_p, min); |

1617 | else |

1618 | min = NULL_TREE; |

1619 | } |

1620 | |

1621 | /* Likewise for the upper bound. */ |

1622 | if (sym_max_op0 == sym_max_op1) |

1623 | ; |

1624 | else if (sym_max_op0) |

1625 | max = build_symbolic_expr (expr_type, sym_max_op0, |

1626 | neg_max_op0, max); |

1627 | else if (sym_max_op1) |

1628 | { |

1629 | /* We may not negate if that might introduce |

1630 | undefined overflow. */ |

1631 | if (! minus_p |

1632 | || neg_max_op1 |

1633 | || TYPE_OVERFLOW_WRAPS (expr_type)) |

1634 | max = build_symbolic_expr (expr_type, sym_max_op1, |

1635 | neg_max_op1 ^ minus_p, max); |

1636 | else |

1637 | max = NULL_TREE; |

1638 | } |

1639 | } |

1640 | else |

1641 | { |

1642 | /* For other cases, for example if we have a PLUS_EXPR with two |

1643 | VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort |

1644 | to compute a precise range for such a case. |

1645 | ??? General even mixed range kind operations can be expressed |

1646 | by for example transforming ~[3, 5] + [1, 2] to range-only |

1647 | operations and a union primitive: |

1648 | [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] |

1649 | [-INF+1, 4] U [6, +INF(OVF)] |

1650 | though usually the union is not exactly representable with |

1651 | a single range or anti-range as the above is |

1652 | [-INF+1, +INF(OVF)] intersected with ~[5, 5] |

1653 | but one could use a scheme similar to equivalences for this. */ |

1654 | set_value_range_to_varying (vr); |

1655 | return; |

1656 | } |

1657 | } |

1658 | else if (code == MIN_EXPR |

1659 | || code == MAX_EXPR) |

1660 | { |

1661 | if (vr0.type == VR_RANGE |

1662 | && !symbolic_range_p (&vr0)) |

1663 | { |

1664 | type = VR_RANGE; |

1665 | if (vr1.type == VR_RANGE |

1666 | && !symbolic_range_p (&vr1)) |

1667 | { |

1668 | /* For operations that make the resulting range directly |

1669 | proportional to the original ranges, apply the operation to |

1670 | the same end of each range. */ |

1671 | min = int_const_binop (code, vr0.min, vr1.min); |

1672 | max = int_const_binop (code, vr0.max, vr1.max); |

1673 | } |

1674 | else if (code == MIN_EXPR) |

1675 | { |

1676 | min = vrp_val_min (expr_type); |

1677 | max = vr0.max; |

1678 | } |

1679 | else if (code == MAX_EXPR) |

1680 | { |

1681 | min = vr0.min; |

1682 | max = vrp_val_max (expr_type); |

1683 | } |

1684 | } |

1685 | else if (vr1.type == VR_RANGE |

1686 | && !symbolic_range_p (&vr1)) |

1687 | { |

1688 | type = VR_RANGE; |

1689 | if (code == MIN_EXPR) |

1690 | { |

1691 | min = vrp_val_min (expr_type); |

1692 | max = vr1.max; |

1693 | } |

1694 | else if (code == MAX_EXPR) |

1695 | { |

1696 | min = vr1.min; |

1697 | max = vrp_val_max (expr_type); |

1698 | } |

1699 | } |

1700 | else |

1701 | { |

1702 | set_value_range_to_varying (vr); |

1703 | return; |

1704 | } |

1705 | } |

1706 | else if (code == MULT_EXPR) |

1707 | { |

1708 | /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not |

1709 | drop to varying. This test requires 2*prec bits if both |

1710 | operands are signed and 2*prec + 2 bits if either is not. */ |

1711 | |

1712 | signop sign = TYPE_SIGN (expr_type); |

1713 | unsigned int prec = TYPE_PRECISION (expr_type); |

1714 | |

1715 | if (!range_int_cst_p (&vr0) |

1716 | || !range_int_cst_p (&vr1)) |

1717 | { |

1718 | set_value_range_to_varying (vr); |

1719 | return; |

1720 | } |

1721 | |

1722 | if (TYPE_OVERFLOW_WRAPS (expr_type)) |

1723 | { |

1724 | typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; |

1725 | typedef generic_wide_int |

1726 | <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; |

1727 | vrp_int sizem1 = wi::mask <vrp_int> (prec, false); |

1728 | vrp_int size = sizem1 + 1; |

1729 | |

1730 | /* Extend the values using the sign of the result to PREC2. |

1731 | From here on out, everthing is just signed math no matter |

1732 | what the input types were. */ |

1733 | vrp_int min0 = vrp_int_cst (vr0.min); |

1734 | vrp_int max0 = vrp_int_cst (vr0.max); |

1735 | vrp_int min1 = vrp_int_cst (vr1.min); |

1736 | vrp_int max1 = vrp_int_cst (vr1.max); |

1737 | /* Canonicalize the intervals. */ |

1738 | if (sign == UNSIGNED) |

1739 | { |

1740 | if (wi::ltu_p (size, min0 + max0)) |

1741 | { |

1742 | min0 -= size; |

1743 | max0 -= size; |

1744 | } |

1745 | |

1746 | if (wi::ltu_p (size, min1 + max1)) |

1747 | { |

1748 | min1 -= size; |

1749 | max1 -= size; |

1750 | } |

1751 | } |

1752 | |

1753 | vrp_int prod0 = min0 * min1; |

1754 | vrp_int prod1 = min0 * max1; |

1755 | vrp_int prod2 = max0 * min1; |

1756 | vrp_int prod3 = max0 * max1; |

1757 | |

1758 | /* Sort the 4 products so that min is in prod0 and max is in |

1759 | prod3. */ |

1760 | /* min0min1 > max0max1 */ |

1761 | if (prod0 > prod3) |

1762 | std::swap (prod0, prod3); |

1763 | |

1764 | /* min0max1 > max0min1 */ |

1765 | if (prod1 > prod2) |

1766 | std::swap (prod1, prod2); |

1767 | |

1768 | if (prod0 > prod1) |

1769 | std::swap (prod0, prod1); |

1770 | |

1771 | if (prod2 > prod3) |

1772 | std::swap (prod2, prod3); |

1773 | |

1774 | /* diff = max - min. */ |

1775 | prod2 = prod3 - prod0; |

1776 | if (wi::geu_p (prod2, sizem1)) |

1777 | { |

1778 | /* the range covers all values. */ |

1779 | set_value_range_to_varying (vr); |

1780 | return; |

1781 | } |

1782 | |

1783 | /* The following should handle the wrapping and selecting |

1784 | VR_ANTI_RANGE for us. */ |

1785 | min = wide_int_to_tree (expr_type, prod0); |

1786 | max = wide_int_to_tree (expr_type, prod3); |

1787 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |

1788 | return; |

1789 | } |

1790 | |

1791 | /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |

1792 | drop to VR_VARYING. It would take more effort to compute a |

1793 | precise range for such a case. For example, if we have |

1794 | op0 == 65536 and op1 == 65536 with their ranges both being |

1795 | ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so |

1796 | we cannot claim that the product is in ~[0,0]. Note that we |

1797 | are guaranteed to have vr0.type == vr1.type at this |

1798 | point. */ |

1799 | if (vr0.type == VR_ANTI_RANGE |

1800 | && !TYPE_OVERFLOW_UNDEFINED (expr_type)) |

1801 | { |

1802 | set_value_range_to_varying (vr); |

1803 | return; |

1804 | } |

1805 | |

1806 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |

1807 | return; |

1808 | } |

1809 | else if (code == RSHIFT_EXPR |

1810 | || code == LSHIFT_EXPR) |

1811 | { |

1812 | /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |

1813 | then drop to VR_VARYING. Outside of this range we get undefined |

1814 | behavior from the shift operation. We cannot even trust |

1815 | SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |

1816 | shifts, and the operation at the tree level may be widened. */ |

1817 | if (range_int_cst_p (&vr1) |

1818 | && compare_tree_int (vr1.min, 0) >= 0 |

1819 | && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) |

1820 | { |

1821 | if (code == RSHIFT_EXPR) |

1822 | { |

1823 | /* Even if vr0 is VARYING or otherwise not usable, we can derive |

1824 | useful ranges just from the shift count. E.g. |

1825 | x >> 63 for signed 64-bit x is always [-1, 0]. */ |

1826 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) |

1827 | { |

1828 | vr0.type = type = VR_RANGE; |

1829 | vr0.min = vrp_val_min (expr_type); |

1830 | vr0.max = vrp_val_max (expr_type); |

1831 | } |

1832 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |

1833 | return; |

1834 | } |

1835 | /* We can map lshifts by constants to MULT_EXPR handling. */ |

1836 | else if (code == LSHIFT_EXPR |

1837 | && range_int_cst_singleton_p (&vr1)) |

1838 | { |

1839 | bool saved_flag_wrapv; |

1840 | value_range vr1p = VR_INITIALIZER; |

1841 | vr1p.type = VR_RANGE; |

1842 | vr1p.min = (wide_int_to_tree |

1843 | (expr_type, |

1844 | wi::set_bit_in_zero (tree_to_shwi (vr1.min), |

1845 | TYPE_PRECISION (expr_type)))); |

1846 | vr1p.max = vr1p.min; |

1847 | /* We have to use a wrapping multiply though as signed overflow |

1848 | on lshifts is implementation defined in C89. */ |

1849 | saved_flag_wrapv = flag_wrapv; |

1850 | flag_wrapv = 1; |

1851 | extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, |

1852 | &vr0, &vr1p); |

1853 | flag_wrapv = saved_flag_wrapv; |

1854 | return; |

1855 | } |

1856 | else if (code == LSHIFT_EXPR |

1857 | && range_int_cst_p (&vr0)) |

1858 | { |

1859 | int prec = TYPE_PRECISION (expr_type); |

1860 | int overflow_pos = prec; |

1861 | int bound_shift; |

1862 | wide_int low_bound, high_bound; |

1863 | bool uns = TYPE_UNSIGNED (expr_type); |

1864 | bool in_bounds = false; |

1865 | |

1866 | if (!uns) |

1867 | overflow_pos -= 1; |

1868 | |

1869 | bound_shift = overflow_pos - tree_to_shwi (vr1.max); |

1870 | /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can |

1871 | overflow. However, for that to happen, vr1.max needs to be |

1872 | zero, which means vr1 is a singleton range of zero, which |

1873 | means it should be handled by the previous LSHIFT_EXPR |

1874 | if-clause. */ |

1875 | wide_int bound = wi::set_bit_in_zero (bound_shift, prec); |

1876 | wide_int complement = ~(bound - 1); |

1877 | |

1878 | if (uns) |

1879 | { |

1880 | low_bound = bound; |

1881 | high_bound = complement; |

1882 | if (wi::ltu_p (wi::to_wide (vr0.max), low_bound)) |

1883 | { |

1884 | /* [5, 6] << [1, 2] == [10, 24]. */ |

1885 | /* We're shifting out only zeroes, the value increases |

1886 | monotonically. */ |

1887 | in_bounds = true; |

1888 | } |

1889 | else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min))) |

1890 | { |

1891 | /* [0xffffff00, 0xffffffff] << [1, 2] |

1892 | == [0xfffffc00, 0xfffffffe]. */ |

1893 | /* We're shifting out only ones, the value decreases |

1894 | monotonically. */ |

1895 | in_bounds = true; |

1896 | } |

1897 | } |

1898 | else |

1899 | { |

1900 | /* [-1, 1] << [1, 2] == [-4, 4]. */ |

1901 | low_bound = complement; |

1902 | high_bound = bound; |

1903 | if (wi::lts_p (wi::to_wide (vr0.max), high_bound) |

1904 | && wi::lts_p (low_bound, wi::to_wide (vr0.min))) |

1905 | { |

1906 | /* For non-negative numbers, we're shifting out only |

1907 | zeroes, the value increases monotonically. |

1908 | For negative numbers, we're shifting out only ones, the |

1909 | value decreases monotomically. */ |

1910 | in_bounds = true; |

1911 | } |

1912 | } |

1913 | |

1914 | if (in_bounds) |

1915 | { |

1916 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |

1917 | return; |

1918 | } |

1919 | } |

1920 | } |

1921 | set_value_range_to_varying (vr); |

1922 | return; |

1923 | } |

1924 | else if (code == TRUNC_DIV_EXPR |

1925 | || code == FLOOR_DIV_EXPR |

1926 | || code == CEIL_DIV_EXPR |

1927 | || code == EXACT_DIV_EXPR |

1928 | || code == ROUND_DIV_EXPR) |

1929 | { |

1930 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) |

1931 | { |

1932 | /* For division, if op1 has VR_RANGE but op0 does not, something |

1933 | can be deduced just from that range. Say [min, max] / [4, max] |

1934 | gives [min / 4, max / 4] range. */ |

1935 | if (vr1.type == VR_RANGE |

1936 | && !symbolic_range_p (&vr1) |

1937 | && range_includes_zero_p (vr1.min, vr1.max) == 0) |

1938 | { |

1939 | vr0.type = type = VR_RANGE; |

1940 | vr0.min = vrp_val_min (expr_type); |

1941 | vr0.max = vrp_val_max (expr_type); |

1942 | } |

1943 | else |

1944 | { |

1945 | set_value_range_to_varying (vr); |

1946 | return; |

1947 | } |

1948 | } |

1949 | |

1950 | /* For divisions, if flag_non_call_exceptions is true, we must |

1951 | not eliminate a division by zero. */ |

1952 | if (cfun->can_throw_non_call_exceptions |

1953 | && (vr1.type != VR_RANGE |

1954 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |

1955 | { |

1956 | set_value_range_to_varying (vr); |

1957 | return; |

1958 | } |

1959 | |

1960 | /* For divisions, if op0 is VR_RANGE, we can deduce a range |

1961 | even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can |

1962 | include 0. */ |

1963 | if (vr0.type == VR_RANGE |

1964 | && (vr1.type != VR_RANGE |

1965 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |

1966 | { |

1967 | tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); |

1968 | int cmp; |

1969 | |

1970 | min = NULL_TREE; |

1971 | max = NULL_TREE; |

1972 | if (TYPE_UNSIGNED (expr_type) |

1973 | || value_range_nonnegative_p (&vr1)) |

1974 | { |

1975 | /* For unsigned division or when divisor is known |

1976 | to be non-negative, the range has to cover |

1977 | all numbers from 0 to max for positive max |

1978 | and all numbers from min to 0 for negative min. */ |

1979 | cmp = compare_values (vr0.max, zero); |

1980 | if (cmp == -1) |

1981 | { |

1982 | /* When vr0.max < 0, vr1.min != 0 and value |

1983 | ranges for dividend and divisor are available. */ |

1984 | if (vr1.type == VR_RANGE |

1985 | && !symbolic_range_p (&vr0) |

1986 | && !symbolic_range_p (&vr1) |

1987 | && compare_values (vr1.min, zero) != 0) |

1988 | max = int_const_binop (code, vr0.max, vr1.min); |

1989 | else |

1990 | max = zero; |

1991 | } |

1992 | else if (cmp == 0 || cmp == 1) |

1993 | max = vr0.max; |

1994 | else |

1995 | type = VR_VARYING; |

1996 | cmp = compare_values (vr0.min, zero); |

1997 | if (cmp == 1) |

1998 | { |

1999 | /* For unsigned division when value ranges for dividend |

2000 | and divisor are available. */ |

2001 | if (vr1.type == VR_RANGE |

2002 | && !symbolic_range_p (&vr0) |

2003 | && !symbolic_range_p (&vr1) |

2004 | && compare_values (vr1.max, zero) != 0) |

2005 | min = int_const_binop (code, vr0.min, vr1.max); |

2006 | else |

2007 | min = zero; |

2008 | } |

2009 | else if (cmp == 0 || cmp == -1) |

2010 | min = vr0.min; |

2011 | else |

2012 | type = VR_VARYING; |

2013 | } |

2014 | else |

2015 | { |

2016 | /* Otherwise the range is -max .. max or min .. -min |

2017 | depending on which bound is bigger in absolute value, |

2018 | as the division can change the sign. */ |

2019 | abs_extent_range (vr, vr0.min, vr0.max); |

2020 | return; |

2021 | } |

2022 | if (type == VR_VARYING) |

2023 | { |

2024 | set_value_range_to_varying (vr); |

2025 | return; |

2026 | } |

2027 | } |

2028 | else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1)) |

2029 | { |

2030 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |

2031 | return; |

2032 | } |

2033 | } |

2034 | else if (code == TRUNC_MOD_EXPR) |

2035 | { |

2036 | if (range_is_null (&vr1)) |

2037 | { |

2038 | set_value_range_to_undefined (vr); |

2039 | return; |

2040 | } |

2041 | /* ABS (A % B) < ABS (B) and either |

2042 | 0 <= A % B <= A or A <= A % B <= 0. */ |

2043 | type = VR_RANGE; |

2044 | signop sgn = TYPE_SIGN (expr_type); |

2045 | unsigned int prec = TYPE_PRECISION (expr_type); |

2046 | wide_int wmin, wmax, tmp; |

2047 | if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) |

2048 | { |

2049 | wmax = wi::to_wide (vr1.max) - 1; |

2050 | if (sgn == SIGNED) |

2051 | { |

2052 | tmp = -1 - wi::to_wide (vr1.min); |

2053 | wmax = wi::smax (wmax, tmp); |

2054 | } |

2055 | } |

2056 | else |

2057 | { |

2058 | wmax = wi::max_value (prec, sgn); |

2059 | /* X % INT_MIN may be INT_MAX. */ |

2060 | if (sgn == UNSIGNED) |

2061 | wmax = wmax - 1; |

2062 | } |

2063 | |

2064 | if (sgn == UNSIGNED) |

2065 | wmin = wi::zero (prec); |

2066 | else |

2067 | { |

2068 | wmin = -wmax; |

2069 | if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST) |

2070 | { |

2071 | tmp = wi::to_wide (vr0.min); |

2072 | if (wi::gts_p (tmp, 0)) |

2073 | tmp = wi::zero (prec); |

2074 | wmin = wi::smax (wmin, tmp); |

2075 | } |

2076 | } |

2077 | |

2078 | if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST) |

2079 | { |

2080 | tmp = wi::to_wide (vr0.max); |

2081 | if (sgn == SIGNED && wi::neg_p (tmp)) |

2082 | tmp = wi::zero (prec); |

2083 | wmax = wi::min (wmax, tmp, sgn); |

2084 | } |

2085 | |

2086 | min = wide_int_to_tree (expr_type, wmin); |

2087 | max = wide_int_to_tree (expr_type, wmax); |

2088 | } |

2089 | else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |

2090 | { |

2091 | bool int_cst_range0, int_cst_range1; |

2092 | wide_int may_be_nonzero0, may_be_nonzero1; |

2093 | wide_int must_be_nonzero0, must_be_nonzero1; |

2094 | |

2095 | int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, |

2096 | &may_be_nonzero0, |

2097 | &must_be_nonzero0); |

2098 | int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, |

2099 | &may_be_nonzero1, |

2100 | &must_be_nonzero1); |

2101 | |

2102 | if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR) |

2103 | { |

2104 | value_range *vr0p = NULL, *vr1p = NULL; |

2105 | if (range_int_cst_singleton_p (&vr1)) |

2106 | { |

2107 | vr0p = &vr0; |

2108 | vr1p = &vr1; |

2109 | } |

2110 | else if (range_int_cst_singleton_p (&vr0)) |

2111 | { |

2112 | vr0p = &vr1; |

2113 | vr1p = &vr0; |

2114 | } |

2115 | /* For op & or | attempt to optimize: |

2116 | [x, y] op z into [x op z, y op z] |

2117 | if z is a constant which (for op | its bitwise not) has n |

2118 | consecutive least significant bits cleared followed by m 1 |

2119 | consecutive bits set immediately above it and either |

2120 | m + n == precision, or (x >> (m + n)) == (y >> (m + n)). |

2121 | The least significant n bits of all the values in the range are |

2122 | cleared or set, the m bits above it are preserved and any bits |

2123 | above these are required to be the same for all values in the |

2124 | range. */ |

2125 | if (vr0p && range_int_cst_p (vr0p)) |

2126 | { |

2127 | wide_int w = wi::to_wide (vr1p->min); |

2128 | int m = 0, n = 0; |

2129 | if (code == BIT_IOR_EXPR) |

2130 | w = ~w; |

2131 | if (wi::eq_p (w, 0)) |

2132 | n = TYPE_PRECISION (expr_type); |

2133 | else |

2134 | { |

2135 | n = wi::ctz (w); |

2136 | w = ~(w | wi::mask (n, false, w.get_precision ())); |

2137 | if (wi::eq_p (w, 0)) |

2138 | m = TYPE_PRECISION (expr_type) - n; |

2139 | else |

2140 | m = wi::ctz (w) - n; |

2141 | } |

2142 | wide_int mask = wi::mask (m + n, true, w.get_precision ()); |

2143 | if ((mask & wi::to_wide (vr0p->min)) |

2144 | == (mask & wi::to_wide (vr0p->max))) |

2145 | { |

2146 | min = int_const_binop (code, vr0p->min, vr1p->min); |

2147 | max = int_const_binop (code, vr0p->max, vr1p->min); |

2148 | } |

2149 | } |

2150 | } |

2151 | |

2152 | type = VR_RANGE; |

2153 | if (min && max) |

2154 | /* Optimized above already. */; |

2155 | else if (code == BIT_AND_EXPR) |

2156 | { |

2157 | min = wide_int_to_tree (expr_type, |

2158 | must_be_nonzero0 & must_be_nonzero1); |

2159 | wide_int wmax = may_be_nonzero0 & may_be_nonzero1; |

2160 | /* If both input ranges contain only negative values we can |

2161 | truncate the result range maximum to the minimum of the |

2162 | input range maxima. */ |

2163 | if (int_cst_range0 && int_cst_range1 |

2164 | && tree_int_cst_sgn (vr0.max) < 0 |

2165 | && tree_int_cst_sgn (vr1.max) < 0) |

2166 | { |

2167 | wmax = wi::min (wmax, wi::to_wide (vr0.max), |

2168 | TYPE_SIGN (expr_type)); |

2169 | wmax = wi::min (wmax, wi::to_wide (vr1.max), |

2170 | TYPE_SIGN (expr_type)); |

2171 | } |

2172 | /* If either input range contains only non-negative values |

2173 | we can truncate the result range maximum to the respective |

2174 | maximum of the input range. */ |

2175 | if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) |

2176 | wmax = wi::min (wmax, wi::to_wide (vr0.max), |

2177 | TYPE_SIGN (expr_type)); |

2178 | if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) |

2179 | wmax = wi::min (wmax, wi::to_wide (vr1.max), |

2180 | TYPE_SIGN (expr_type)); |

2181 | max = wide_int_to_tree (expr_type, wmax); |

2182 | cmp = compare_values (min, max); |

2183 | /* PR68217: In case of signed & sign-bit-CST should |

2184 | result in [-INF, 0] instead of [-INF, INF]. */ |

2185 | if (cmp == -2 || cmp == 1) |

2186 | { |

2187 | wide_int sign_bit |

2188 | = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1, |

2189 | TYPE_PRECISION (expr_type)); |

2190 | if (!TYPE_UNSIGNED (expr_type) |

2191 | && ((int_cst_range0 |

2192 | && value_range_constant_singleton (&vr0) |

2193 | && !wi::cmps (wi::to_wide (vr0.min), sign_bit)) |

2194 | || (int_cst_range1 |

2195 | && value_range_constant_singleton (&vr1) |

2196 | && !wi::cmps (wi::to_wide (vr1.min), sign_bit)))) |

2197 | { |

2198 | min = TYPE_MIN_VALUE (expr_type); |

2199 | max = build_int_cst (expr_type, 0); |

2200 | } |

2201 | } |

2202 | } |

2203 | else if (code == BIT_IOR_EXPR) |

2204 | { |

2205 | max = wide_int_to_tree (expr_type, |

2206 | may_be_nonzero0 | may_be_nonzero1); |

2207 | wide_int wmin = must_be_nonzero0 | must_be_nonzero1; |

2208 | /* If the input ranges contain only positive values we can |

2209 | truncate the minimum of the result range to the maximum |

2210 | of the input range minima. */ |

2211 | if (int_cst_range0 && int_cst_range1 |

2212 | && tree_int_cst_sgn (vr0.min) >= 0 |

2213 | && tree_int_cst_sgn (vr1.min) >= 0) |

2214 | { |

2215 | wmin = wi::max (wmin, wi::to_wide (vr0.min), |

2216 | TYPE_SIGN (expr_type)); |

2217 | wmin = wi::max (wmin, wi::to_wide (vr1.min), |

2218 | TYPE_SIGN (expr_type)); |

2219 | } |

2220 | /* If either input range contains only negative values |

2221 | we can truncate the minimum of the result range to the |

2222 | respective minimum range. */ |

2223 | if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) |

2224 | wmin = wi::max (wmin, wi::to_wide (vr0.min), |

2225 | TYPE_SIGN (expr_type)); |

2226 | if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) |

2227 | wmin = wi::max (wmin, wi::to_wide (vr1.min), |

2228 | TYPE_SIGN (expr_type)); |

2229 | min = wide_int_to_tree (expr_type, wmin); |

2230 | } |

2231 | else if (code == BIT_XOR_EXPR) |

2232 | { |

2233 | wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) |

2234 | | ~(may_be_nonzero0 | may_be_nonzero1)); |

2235 | wide_int result_one_bits |

2236 | = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1) |

2237 | | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0)); |

2238 | max = wide_int_to_tree (expr_type, ~result_zero_bits); |

2239 | min = wide_int_to_tree (expr_type, result_one_bits); |

2240 | /* If the range has all positive or all negative values the |

2241 | result is better than VARYING. */ |

2242 | if (tree_int_cst_sgn (min) < 0 |

2243 | || tree_int_cst_sgn (max) >= 0) |

2244 | ; |

2245 | else |

2246 | max = min = NULL_TREE; |

2247 | } |

2248 | } |

2249 | else |

2250 | gcc_unreachable (); |

2251 | |

2252 | /* If either MIN or MAX overflowed, then set the resulting range to |

2253 | VARYING. */ |

2254 | if (min == NULL_TREE |

2255 | || TREE_OVERFLOW_P (min) |

2256 | || max == NULL_TREE |

2257 | || TREE_OVERFLOW_P (max)) |

2258 | { |

2259 | set_value_range_to_varying (vr); |

2260 | return; |

2261 | } |

2262 | |

2263 | /* We punt for [-INF, +INF]. |

2264 | We learn nothing when we have INF on both sides. |

2265 | Note that we do accept [-INF, -INF] and [+INF, +INF]. */ |

2266 | if (vrp_val_is_min (min) && vrp_val_is_max (max)) |

2267 | { |

2268 | set_value_range_to_varying (vr); |

2269 | return; |

2270 | } |

2271 | |

2272 | cmp = compare_values (min, max); |

2273 | if (cmp == -2 || cmp == 1) |

2274 | { |

2275 | /* If the new range has its limits swapped around (MIN > MAX), |

2276 | then the operation caused one of them to wrap around, mark |

2277 | the new range VARYING. */ |

2278 | set_value_range_to_varying (vr); |

2279 | } |

2280 | else |

2281 | set_value_range (vr, type, min, max, NULL); |

2282 | } |

2283 | |

2284 | /* Extract range information from a unary operation CODE based on |

2285 | the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. |

2286 | The resulting range is stored in *VR. */ |

2287 | |

2288 | void |

2289 | extract_range_from_unary_expr (value_range *vr, |

2290 | enum tree_code code, tree type, |

2291 | value_range *vr0_, tree op0_type) |

2292 | { |

2293 | value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |

2294 | |

2295 | /* VRP only operates on integral and pointer types. */ |

2296 | if (!(INTEGRAL_TYPE_P (op0_type) |

2297 | || POINTER_TYPE_P (op0_type)) |

2298 | || !(INTEGRAL_TYPE_P (type) |

2299 | || POINTER_TYPE_P (type))) |

2300 | { |

2301 | set_value_range_to_varying (vr); |

2302 | return; |

2303 | } |

2304 | |

2305 | /* If VR0 is UNDEFINED, so is the result. */ |

2306 | if (vr0.type == VR_UNDEFINED) |

2307 | { |

2308 | set_value_range_to_undefined (vr); |

2309 | return; |

2310 | } |

2311 | |

2312 | /* Handle operations that we express in terms of others. */ |

2313 | if (code == PAREN_EXPR || code == OBJ_TYPE_REF) |

2314 | { |

2315 | /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |

2316 | copy_value_range (vr, &vr0); |

2317 | return; |

2318 | } |

2319 | else if (code == NEGATE_EXPR) |

2320 | { |

2321 | /* -X is simply 0 - X, so re-use existing code that also handles |

2322 | anti-ranges fine. */ |

2323 | value_range zero = VR_INITIALIZER; |

2324 | set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); |

2325 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); |

2326 | return; |

2327 | } |

2328 | else if (code == BIT_NOT_EXPR) |

2329 | { |

2330 | /* ~X is simply -1 - X, so re-use existing code that also handles |

2331 | anti-ranges fine. */ |

2332 | value_range minusone = VR_INITIALIZER; |

2333 | set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); |

2334 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, |

2335 | type, &minusone, &vr0); |

2336 | return; |

2337 | } |

2338 | |

2339 | /* Now canonicalize anti-ranges to ranges when they are not symbolic |

2340 | and express op ~[] as (op []') U (op []''). */ |

2341 | if (vr0.type == VR_ANTI_RANGE |

2342 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) |

2343 | { |

2344 | extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type); |

2345 | if (vrtem1.type != VR_UNDEFINED) |

2346 | { |

2347 | value_range vrres = VR_INITIALIZER; |

2348 | extract_range_from_unary_expr (&vrres, code, type, |

2349 | &vrtem1, op0_type); |

2350 | vrp_meet (vr, &vrres); |

2351 | } |

2352 | return; |

2353 | } |

2354 | |

2355 | if (CONVERT_EXPR_CODE_P (code)) |

2356 | { |

2357 | tree inner_type = op0_type; |

2358 | tree outer_type = type; |

2359 | |

2360 | /* If the expression evaluates to a pointer, we are only interested in |

2361 | determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ |

2362 | if (POINTER_TYPE_P (type)) |

2363 | { |

2364 | if (range_is_nonnull (&vr0)) |

2365 | set_value_range_to_nonnull (vr, type); |

2366 | else if (range_is_null (&vr0)) |

2367 | set_value_range_to_null (vr, type); |

2368 | else |

2369 | set_value_range_to_varying (vr); |

2370 | return; |

2371 | } |

2372 | |

2373 | /* If VR0 is varying and we increase the type precision, assume |

2374 | a full range for the following transformation. */ |

2375 | if (vr0.type == VR_VARYING |

2376 | && INTEGRAL_TYPE_P (inner_type) |

2377 | && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) |

2378 | { |

2379 | vr0.type = VR_RANGE; |

2380 | vr0.min = TYPE_MIN_VALUE (inner_type); |

2381 | vr0.max = TYPE_MAX_VALUE (inner_type); |

2382 | } |

2383 | |

2384 | /* If VR0 is a constant range or anti-range and the conversion is |

2385 | not truncating we can convert the min and max values and |

2386 | canonicalize the resulting range. Otherwise we can do the |

2387 | conversion if the size of the range is less than what the |

2388 | precision of the target type can represent and the range is |

2389 | not an anti-range. */ |

2390 | if ((vr0.type == VR_RANGE |

2391 | || vr0.type == VR_ANTI_RANGE) |

2392 | && TREE_CODE (vr0.min) == INTEGER_CST |

2393 | && TREE_CODE (vr0.max) == INTEGER_CST |

2394 | && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) |

2395 | || (vr0.type == VR_RANGE |

2396 | && integer_zerop (int_const_binop (RSHIFT_EXPR, |

2397 | int_const_binop (MINUS_EXPR, vr0.max, vr0.min), |

2398 | size_int (TYPE_PRECISION (outer_type))))))) |

2399 | { |

2400 | tree new_min, new_max; |

2401 | new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), |

2402 | 0, false); |

2403 | new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), |

2404 | 0, false); |

2405 | set_and_canonicalize_value_range (vr, vr0.type, |

2406 | new_min, new_max, NULL); |

2407 | return; |

2408 | } |

2409 | |

2410 | set_value_range_to_varying (vr); |

2411 | return; |

2412 | } |

2413 | else if (code == ABS_EXPR) |

2414 | { |

2415 | tree min, max; |

2416 | int cmp; |

2417 | |

2418 | /* Pass through vr0 in the easy cases. */ |

2419 | if (TYPE_UNSIGNED (type) |

2420 | || value_range_nonnegative_p (&vr0)) |

2421 | { |

2422 | copy_value_range (vr, &vr0); |

2423 | return; |

2424 | } |

2425 | |

2426 | /* For the remaining varying or symbolic ranges we can't do anything |

2427 | useful. */ |

2428 | if (vr0.type == VR_VARYING |

2429 | || symbolic_range_p (&vr0)) |

2430 | { |

2431 | set_value_range_to_varying (vr); |

2432 | return; |

2433 | } |

2434 | |

2435 | /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |

2436 | useful range. */ |

2437 | if (!TYPE_OVERFLOW_UNDEFINED (type) |

2438 | && ((vr0.type == VR_RANGE |

2439 | && vrp_val_is_min (vr0.min)) |

2440 | || (vr0.type == VR_ANTI_RANGE |

2441 | && !vrp_val_is_min (vr0.min)))) |

2442 | { |

2443 | set_value_range_to_varying (vr); |

2444 | return; |

2445 | } |

2446 | |

2447 | /* ABS_EXPR may flip the range around, if the original range |

2448 | included negative values. */ |

2449 | if (!vrp_val_is_min (vr0.min)) |

2450 | min = fold_unary_to_constant (code, type, vr0.min); |

2451 | else |

2452 | min = TYPE_MAX_VALUE (type); |

2453 | |

2454 | if (!vrp_val_is_min (vr0.max)) |

2455 | max = fold_unary_to_constant (code, type, vr0.max); |

2456 | else |

2457 | max = TYPE_MAX_VALUE (type); |

2458 | |

2459 | cmp = compare_values (min, max); |

2460 | |

2461 | /* If a VR_ANTI_RANGEs contains zero, then we have |

2462 | ~[-INF, min(MIN, MAX)]. */ |

2463 | if (vr0.type == VR_ANTI_RANGE) |

2464 | { |

2465 | if (range_includes_zero_p (vr0.min, vr0.max) == 1) |

2466 | { |

2467 | /* Take the lower of the two values. */ |

2468 | if (cmp != 1) |

2469 | max = min; |

2470 | |

2471 | /* Create ~[-INF, min (abs(MIN), abs(MAX))] |

2472 | or ~[-INF + 1, min (abs(MIN), abs(MAX))] when |

2473 | flag_wrapv is set and the original anti-range doesn't include |

2474 | TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ |

2475 | if (TYPE_OVERFLOW_WRAPS (type)) |

2476 | { |

2477 | tree type_min_value = TYPE_MIN_VALUE (type); |

2478 | |

2479 | min = (vr0.min != type_min_value |

2480 | ? int_const_binop (PLUS_EXPR, type_min_value, |

2481 | build_int_cst (TREE_TYPE (type_min_value), 1)) |

2482 | : type_min_value); |

2483 | } |

2484 | else |

2485 | min = TYPE_MIN_VALUE (type); |

2486 | } |

2487 | else |

2488 | { |

2489 | /* All else has failed, so create the range [0, INF], even for |

2490 | flag_wrapv since TYPE_MIN_VALUE is in the original |

2491 | anti-range. */ |

2492 | vr0.type = VR_RANGE; |

2493 | min = build_int_cst (type, 0); |

2494 | max = TYPE_MAX_VALUE (type); |

2495 | } |

2496 | } |

2497 | |

2498 | /* If the range contains zero then we know that the minimum value in the |

2499 | range will be zero. */ |

2500 | else if (range_includes_zero_p (vr0.min, vr0.max) == 1) |

2501 | { |

2502 | if (cmp == 1) |

2503 | max = min; |

2504 | min = build_int_cst (type, 0); |

2505 | } |

2506 | else |

2507 | { |

2508 | /* If the range was reversed, swap MIN and MAX. */ |

2509 | if (cmp == 1) |

2510 | std::swap (min, max); |

2511 | } |

2512 | |

2513 | cmp = compare_values (min, max); |

2514 | if (cmp == -2 || cmp == 1) |

2515 | { |

2516 | /* If the new range has its limits swapped around (MIN > MAX), |

2517 | then the operation caused one of them to wrap around, mark |

2518 | the new range VARYING. */ |

2519 | set_value_range_to_varying (vr); |

2520 | } |

2521 | else |

2522 | set_value_range (vr, vr0.type, min, max, NULL); |

2523 | return; |

2524 | } |

2525 | |

2526 | /* For unhandled operations fall back to varying. */ |

2527 | set_value_range_to_varying (vr); |

2528 | return; |

2529 | } |

2530 | |

2531 | /* Debugging dumps. */ |

2532 | |

2533 | void dump_value_range (FILE *, const value_range *); |

2534 | void debug_value_range (value_range *); |

2535 | void dump_all_value_ranges (FILE *); |

2536 | void dump_vr_equiv (FILE *, bitmap); |

2537 | void debug_vr_equiv (bitmap); |

2538 | |

2539 | |

2540 | /* Dump value range VR to FILE. */ |

2541 | |

2542 | void |

2543 | dump_value_range (FILE *file, const value_range *vr) |

2544 | { |

2545 | if (vr == NULL) |

2546 | fprintf (file, "[]"); |

2547 | else if (vr->type == VR_UNDEFINED) |

2548 | fprintf (file, "UNDEFINED"); |

2549 | else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) |

2550 | { |

2551 | tree type = TREE_TYPE (vr->min); |

2552 | |

2553 | fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~": ""); |

2554 | |

2555 | if (INTEGRAL_TYPE_P (type) |

2556 | && !TYPE_UNSIGNED (type) |

2557 | && vrp_val_is_min (vr->min)) |

2558 | fprintf (file, "-INF"); |

2559 | else |

2560 | print_generic_expr (file, vr->min); |

2561 | |

2562 | fprintf (file, ", "); |

2563 | |

2564 | if (INTEGRAL_TYPE_P (type) |

2565 | && vrp_val_is_max (vr->max)) |

2566 | fprintf (file, "+INF"); |

2567 | else |

2568 | print_generic_expr (file, vr->max); |

2569 | |

2570 | fprintf (file, "]"); |

2571 | |

2572 | if (vr->equiv) |

2573 | { |

2574 | bitmap_iterator bi; |

2575 | unsigned i, c = 0; |

2576 | |

2577 | fprintf (file, " EQUIVALENCES: { "); |

2578 | |

2579 | EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) |

2580 | { |

2581 | print_generic_expr (file, ssa_name (i)); |

2582 | fprintf (file, " "); |

2583 | c++; |

2584 | } |

2585 | |

2586 | fprintf (file, "} (%u elements)", c); |

2587 | } |

2588 | } |

2589 | else if (vr->type == VR_VARYING) |

2590 | fprintf (file, "VARYING"); |

2591 | else |

2592 | fprintf (file, "INVALID RANGE"); |

2593 | } |

2594 | |

2595 | |

2596 | /* Dump value range VR to stderr. */ |

2597 | |

2598 | DEBUG_FUNCTION void |

2599 | debug_value_range (value_range *vr) |

2600 | { |

2601 | dump_value_range (stderr, vr); |

2602 | fprintf (stderr, "\n"); |

2603 | } |

2604 | |

2605 | |

2606 | /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |

2607 | create a new SSA name N and return the assertion assignment |

2608 | 'N = ASSERT_EXPR <V, V OP W>'. */ |

2609 | |

2610 | static gimple * |

2611 | build_assert_expr_for (tree cond, tree v) |

2612 | { |

2613 | tree a; |

2614 | gassign *assertion; |

2615 | |

2616 | gcc_assert (TREE_CODE (v) == SSA_NAME |

2617 | && COMPARISON_CLASS_P (cond)); |

2618 | |

2619 | a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |

2620 | assertion = gimple_build_assign (NULL_TREE, a); |

2621 | |

2622 | /* The new ASSERT_EXPR, creates a new SSA name that replaces the |

2623 | operand of the ASSERT_EXPR. Create it so the new name and the old one |

2624 | are registered in the replacement table so that we can fix the SSA web |

2625 | after adding all the ASSERT_EXPRs. */ |

2626 | tree new_def = create_new_def_for (v, assertion, NULL); |

2627 | /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain |

2628 | given we have to be able to fully propagate those out to re-create |

2629 | valid SSA when removing the asserts. */ |

2630 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v)) |

2631 | SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1; |

2632 | |

2633 | return assertion; |

2634 | } |

2635 | |

2636 | |

2637 | /* Return false if EXPR is a predicate expression involving floating |

2638 | point values. */ |

2639 | |

2640 | static inline bool |

2641 | fp_predicate (gimple *stmt) |

2642 | { |

2643 | GIMPLE_CHECK (stmt, GIMPLE_COND); |

2644 | |

2645 | return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); |

2646 | } |

2647 | |

2648 | /* If the range of values taken by OP can be inferred after STMT executes, |

2649 | return the comparison code (COMP_CODE_P) and value (VAL_P) that |

2650 | describes the inferred range. Return true if a range could be |

2651 | inferred. */ |

2652 | |

2653 | bool |

2654 | infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) |

2655 | { |

2656 | *val_p = NULL_TREE; |

2657 | *comp_code_p = ERROR_MARK; |

2658 | |

2659 | /* Do not attempt to infer anything in names that flow through |

2660 | abnormal edges. */ |

2661 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) |

2662 | return false; |

2663 | |

2664 | /* If STMT is the last statement of a basic block with no normal |

2665 | successors, there is no point inferring anything about any of its |

2666 | operands. We would not be able to find a proper insertion point |

2667 | for the assertion, anyway. */ |

2668 | if (stmt_ends_bb_p (stmt)) |

2669 | { |

2670 | edge_iterator ei; |

2671 | edge e; |

2672 | |

2673 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |

2674 | if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH))) |

2675 | break; |

2676 | if (e == NULL) |

2677 | return false; |

2678 | } |

2679 | |

2680 | if (infer_nonnull_range (stmt, op)) |

2681 | { |

2682 | *val_p = build_int_cst (TREE_TYPE (op), 0); |

2683 | *comp_code_p = NE_EXPR; |

2684 | return true; |

2685 | } |

2686 | |

2687 | return false; |

2688 | } |

2689 | |

2690 | |

2691 | void dump_asserts_for (FILE *, tree); |

2692 | void debug_asserts_for (tree); |

2693 | void dump_all_asserts (FILE *); |

2694 | void debug_all_asserts (void); |

2695 | |

2696 | /* Dump all the registered assertions for NAME to FILE. */ |

2697 | |

2698 | void |

2699 | dump_asserts_for (FILE *file, tree name) |

2700 | { |

2701 | assert_locus *loc; |

2702 | |

2703 | fprintf (file, "Assertions to be inserted for "); |

2704 | print_generic_expr (file, name); |

2705 | fprintf (file, "\n"); |

2706 | |

2707 | loc = asserts_for[SSA_NAME_VERSION (name)]; |

2708 | while (loc) |

2709 | { |

2710 | fprintf (file, "\t"); |

2711 | print_gimple_stmt (file, gsi_stmt (loc->si), 0); |

2712 | fprintf (file, "\n\tBB #%d", loc->bb->index); |

2713 | if (loc->e) |

2714 | { |

2715 | fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, |

2716 | loc->e->dest->index); |

2717 | dump_edge_info (file, loc->e, dump_flags, 0); |

2718 | } |

2719 | fprintf (file, "\n\tPREDICATE: "); |

2720 | print_generic_expr (file, loc->expr); |

2721 | fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); |

2722 | print_generic_expr (file, loc->val); |

2723 | fprintf (file, "\n\n"); |

2724 | loc = loc->next; |

2725 | } |

2726 | |

2727 | fprintf (file, "\n"); |

2728 | } |

2729 | |

2730 | |

2731 | /* Dump all the registered assertions for NAME to stderr. */ |

2732 | |

2733 | DEBUG_FUNCTION void |

2734 | debug_asserts_for (tree name) |

2735 | { |

2736 | dump_asserts_for (stderr, name); |

2737 | } |

2738 | |

2739 | |

2740 | /* Dump all the registered assertions for all the names to FILE. */ |

2741 | |

2742 | void |

2743 | dump_all_asserts (FILE *file) |

2744 | { |

2745 | unsigned i; |

2746 | bitmap_iterator bi; |

2747 | |

2748 | fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); |

2749 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |

2750 | dump_asserts_for (file, ssa_name (i)); |

2751 | fprintf (file, "\n"); |

2752 | } |

2753 | |

2754 | |

2755 | /* Dump all the registered assertions for all the names to stderr. */ |

2756 | |

2757 | DEBUG_FUNCTION void |

2758 | debug_all_asserts (void) |

2759 | { |

2760 | dump_all_asserts (stderr); |

2761 | } |

2762 | |

2763 | /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */ |

2764 | |

2765 | static void |

2766 | add_assert_info (vec<assert_info> &asserts, |

2767 | tree name, tree expr, enum tree_code comp_code, tree val) |

2768 | { |

2769 | assert_info info; |

2770 | info.comp_code = comp_code; |

2771 | info.name = name; |

2772 | info.val = val; |

2773 | info.expr = expr; |

2774 | asserts.safe_push (info); |

2775 | } |

2776 | |

2777 | /* If NAME doesn't have an ASSERT_EXPR registered for asserting |

2778 | 'EXPR COMP_CODE VAL' at a location that dominates block BB or |

2779 | E->DEST, then register this location as a possible insertion point |

2780 | for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |

2781 | |

2782 | BB, E and SI provide the exact insertion point for the new |

2783 | ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted |

2784 | on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on |

2785 | BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E |

2786 | must not be NULL. */ |

2787 | |

2788 | static void |

2789 | register_new_assert_for (tree name, tree expr, |

2790 | enum tree_code comp_code, |

2791 | tree val, |

2792 | basic_block bb, |

2793 | edge e, |

2794 | gimple_stmt_iterator si) |

2795 | { |

2796 | assert_locus *n, *loc, *last_loc; |

2797 | basic_block dest_bb; |

2798 | |

2799 | gcc_checking_assert (bb == NULL || e == NULL); |

2800 | |

2801 | if (e == NULL) |

2802 | gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |

2803 | && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); |

2804 | |

2805 | /* Never build an assert comparing against an integer constant with |

2806 | TREE_OVERFLOW set. This confuses our undefined overflow warning |

2807 | machinery. */ |

2808 | if (TREE_OVERFLOW_P (val)) |

2809 | val = drop_tree_overflow (val); |

2810 | |

2811 | /* The new assertion A will be inserted at BB or E. We need to |

2812 | determine if the new location is dominated by a previously |

2813 | registered location for A. If we are doing an edge insertion, |

2814 | assume that A will be inserted at E->DEST. Note that this is not |

2815 | necessarily true. |

2816 | |

2817 | If E is a critical edge, it will be split. But even if E is |

2818 | split, the new block will dominate the same set of blocks that |

2819 | E->DEST dominates. |

2820 | |

2821 | The reverse, however, is not true, blocks dominated by E->DEST |

2822 | will not be dominated by the new block created to split E. So, |

2823 | if the insertion location is on a critical edge, we will not use |

2824 | the new location to move another assertion previously registered |

2825 | at a block dominated by E->DEST. */ |

2826 | dest_bb = (bb) ? bb : e->dest; |

2827 | |

2828 | /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and |

2829 | VAL at a block dominating DEST_BB, then we don't need to insert a new |

2830 | one. Similarly, if the same assertion already exists at a block |

2831 | dominated by DEST_BB and the new location is not on a critical |

2832 | edge, then update the existing location for the assertion (i.e., |

2833 | move the assertion up in the dominance tree). |

2834 | |

2835 | Note, this is implemented as a simple linked list because there |

2836 | should not be more than a handful of assertions registered per |

2837 | name. If this becomes a performance problem, a table hashed by |

2838 | COMP_CODE and VAL could be implemented. */ |

2839 | loc = asserts_for[SSA_NAME_VERSION (name)]; |

2840 | last_loc = loc; |

2841 | while (loc) |

2842 | { |

2843 | if (loc->comp_code == comp_code |

2844 | && (loc->val == val |

2845 | || operand_equal_p (loc->val, val, 0)) |

2846 | && (loc->expr == expr |

2847 | || operand_equal_p (loc->expr, expr, 0))) |

2848 | { |

2849 | /* If E is not a critical edge and DEST_BB |

2850 | dominates the existing location for the assertion, move |

2851 | the assertion up in the dominance tree by updating its |

2852 | location information. */ |

2853 | if ((e == NULL || !EDGE_CRITICAL_P (e)) |

2854 | && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) |

2855 | { |

2856 | loc->bb = dest_bb; |

2857 | loc->e = e; |

2858 | loc->si = si; |

2859 | return; |

2860 | } |

2861 | } |

2862 | |

2863 | /* Update the last node of the list and move to the next one. */ |

2864 | last_loc = loc; |

2865 | loc = loc->next; |

2866 | } |

2867 | |

2868 | /* If we didn't find an assertion already registered for |

2869 | NAME COMP_CODE VAL, add a new one at the end of the list of |

2870 | assertions associated with NAME. */ |

2871 | n = XNEW (struct assert_locus); |

2872 | n->bb = dest_bb; |

2873 | n->e = e; |

2874 | n->si = si; |

2875 | n->comp_code = comp_code; |

2876 | n->val = val; |

2877 | n->expr = expr; |

2878 | n->next = NULL; |

2879 | |

2880 | if (last_loc) |

2881 | last_loc->next = n; |

2882 | else |

2883 | asserts_for[SSA_NAME_VERSION (name)] = n; |

2884 | |

2885 | bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); |

2886 | } |

2887 | |

2888 | /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |

2889 | Extract a suitable test code and value and store them into *CODE_P and |

2890 | *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. |

2891 | |

2892 | If no extraction was possible, return FALSE, otherwise return TRUE. |

2893 | |

2894 | If INVERT is true, then we invert the result stored into *CODE_P. */ |

2895 | |

2896 | static bool |

2897 | extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, |

2898 | tree cond_op0, tree cond_op1, |

2899 | bool invert, enum tree_code *code_p, |

2900 | tree *val_p) |

2901 | { |

2902 | enum tree_code comp_code; |

2903 | tree val; |

2904 | |

2905 | /* Otherwise, we have a comparison of the form NAME COMP VAL |

2906 | or VAL COMP NAME. */ |

2907 | if (name == cond_op1) |

2908 | { |

2909 | /* If the predicate is of the form VAL COMP NAME, flip |

2910 | COMP around because we need to register NAME as the |

2911 | first operand in the predicate. */ |

2912 | comp_code = swap_tree_comparison (cond_code); |

2913 | val = cond_op0; |

2914 | } |

2915 | else if (name == cond_op0) |

2916 | { |

2917 | /* The comparison is of the form NAME COMP VAL, so the |

2918 | comparison code remains unchanged. */ |

2919 | comp_code = cond_code; |

2920 | val = cond_op1; |

2921 | } |

2922 | else |

2923 | gcc_unreachable (); |

2924 | |

2925 | /* Invert the comparison code as necessary. */ |

2926 | if (invert) |

2927 | comp_code = invert_tree_comparison (comp_code, 0); |

2928 | |

2929 | /* VRP only handles integral and pointer types. */ |

2930 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val)) |

2931 | && ! POINTER_TYPE_P (TREE_TYPE (val))) |

2932 | return false; |

2933 | |

2934 | /* Do not register always-false predicates. |

2935 | FIXME: this works around a limitation in fold() when dealing with |

2936 | enumerations. Given 'enum { N1, N2 } x;', fold will not |

2937 | fold 'if (x > N2)' to 'if (0)'. */ |

2938 | if ((comp_code == GT_EXPR || comp_code == LT_EXPR) |

2939 | && INTEGRAL_TYPE_P (TREE_TYPE (val))) |

2940 | { |

2941 | tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); |

2942 | tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); |

2943 | |

2944 | if (comp_code == GT_EXPR |

2945 | && (!max |

2946 | || compare_values (val, max) == 0)) |

2947 | return false; |

2948 | |

2949 | if (comp_code == LT_EXPR |

2950 | && (!min |

2951 | || compare_values (val, min) == 0)) |

2952 | return false; |

2953 | } |

2954 | *code_p = comp_code; |

2955 | *val_p = val; |

2956 | return true; |

2957 | } |

2958 | |

2959 | /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |

2960 | (otherwise return VAL). VAL and MASK must be zero-extended for |

2961 | precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT |

2962 | (to transform signed values into unsigned) and at the end xor |

2963 | SGNBIT back. */ |

2964 | |

2965 | static wide_int |

2966 | masked_increment (const wide_int &val_in, const wide_int &mask, |

2967 | const wide_int &sgnbit, unsigned int prec) |

2968 | { |

2969 | wide_int bit = wi::one (prec), res; |

2970 | unsigned int i; |

2971 | |

2972 | wide_int val = val_in ^ sgnbit; |

2973 | for (i = 0; i < prec; i++, bit += bit) |

2974 | { |

2975 | res = mask; |

2976 | if ((res & bit) == 0) |

2977 | continue; |

2978 | res = bit - 1; |

2979 | res = wi::bit_and_not (val + bit, res); |

2980 | res &= mask; |

2981 | if (wi::gtu_p (res, val)) |

2982 | return res ^ sgnbit; |

2983 | } |

2984 | return val ^ sgnbit; |

2985 | } |

2986 | |

2987 | /* Helper for overflow_comparison_p |

2988 | |

2989 | OP0 CODE OP1 is a comparison. Examine the comparison and potentially |

2990 | OP1's defining statement to see if it ultimately has the form |

2991 | OP0 CODE (OP0 PLUS INTEGER_CST) |

2992 | |

2993 | If so, return TRUE indicating this is an overflow test and store into |

2994 | *NEW_CST an updated constant that can be used in a narrowed range test. |

2995 | |

2996 | REVERSED indicates if the comparison was originally: |

2997 | |

2998 | OP1 CODE' OP0. |

2999 | |

3000 | This affects how we build the updated constant. */ |

3001 | |

3002 | static bool |

3003 | overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1, |

3004 | bool follow_assert_exprs, bool reversed, tree *new_cst) |

3005 | { |

3006 | /* See if this is a relational operation between two SSA_NAMES with |

3007 | unsigned, overflow wrapping values. If so, check it more deeply. */ |

3008 | if ((code == LT_EXPR || code == LE_EXPR |

3009 | || code == GE_EXPR || code == GT_EXPR) |

3010 | && TREE_CODE (op0) == SSA_NAME |

3011 | && TREE_CODE (op1) == SSA_NAME |

3012 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |

3013 | && TYPE_UNSIGNED (TREE_TYPE (op0)) |

3014 | && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))) |

3015 | { |

3016 | gimple *op1_def = SSA_NAME_DEF_STMT (op1); |

3017 | |

3018 | /* If requested, follow any ASSERT_EXPRs backwards for OP1. */ |

3019 | if (follow_assert_exprs) |

3020 | { |

3021 | while (gimple_assign_single_p (op1_def) |

3022 | && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR) |

3023 | { |

3024 | op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0); |

3025 | if (TREE_CODE (op1) != SSA_NAME) |

3026 | break; |

3027 | op1_def = SSA_NAME_DEF_STMT (op1); |

3028 | } |

3029 | } |

3030 | |

3031 | /* Now look at the defining statement of OP1 to see if it adds |

3032 | or subtracts a nonzero constant from another operand. */ |

3033 | if (op1_def |

3034 | && is_gimple_assign (op1_def) |

3035 | && gimple_assign_rhs_code (op1_def) == PLUS_EXPR |

3036 | && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST |

3037 | && !integer_zerop (gimple_assign_rhs2 (op1_def))) |

3038 | { |

3039 | tree target = gimple_assign_rhs1 (op1_def); |

3040 | |

3041 | /* If requested, follow ASSERT_EXPRs backwards for op0 looking |

3042 | for one where TARGET appears on the RHS. */ |

3043 | if (follow_assert_exprs) |

3044 | { |

3045 | /* Now see if that "other operand" is op0, following the chain |

3046 | of ASSERT_EXPRs if necessary. */ |

3047 | gimple *op0_def = SSA_NAME_DEF_STMT (op0); |

3048 | while (op0 != target |

3049 | && gimple_assign_single_p (op0_def) |

3050 | && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR) |

3051 | { |

3052 | op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0); |

3053 | if (TREE_CODE (op0) != SSA_NAME) |

3054 | break; |

3055 | op0_def = SSA_NAME_DEF_STMT (op0); |

3056 | } |

3057 | } |

3058 | |

3059 | /* If we did not find our target SSA_NAME, then this is not |

3060 | an overflow test. */ |

3061 | if (op0 != target) |

3062 | return false; |

3063 | |

3064 | tree type = TREE_TYPE (op0); |

3065 | wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED); |

3066 | tree inc = gimple_assign_rhs2 (op1_def); |

3067 | if (reversed) |

3068 | *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc)); |

3069 | else |

3070 | *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc)); |

3071 | return true; |

3072 | } |

3073 | } |

3074 | return false; |

3075 | } |

3076 | |

3077 | /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially |

3078 | OP1's defining statement to see if it ultimately has the form |

3079 | OP0 CODE (OP0 PLUS INTEGER_CST) |

3080 | |

3081 | If so, return TRUE indicating this is an overflow test and store into |

3082 | *NEW_CST an updated constant that can be used in a narrowed range test. |

3083 | |

3084 | These statements are left as-is in the IL to facilitate discovery of |

3085 | {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But |

3086 | the alternate range representation is often useful within VRP. */ |

3087 | |

3088 | bool |

3089 | overflow_comparison_p (tree_code code, tree name, tree val, |

3090 | bool use_equiv_p, tree *new_cst) |

3091 | { |

3092 | if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst)) |

3093 | return true; |

3094 | return overflow_comparison_p_1 (swap_tree_comparison (code), val, name, |

3095 | use_equiv_p, true, new_cst); |

3096 | } |

3097 | |

3098 | |

3099 | /* Try to register an edge assertion for SSA name NAME on edge E for |

3100 | the condition COND contributing to the conditional jump pointed to by BSI. |

3101 | Invert the condition COND if INVERT is true. */ |

3102 | |

3103 | static void |

3104 | register_edge_assert_for_2 (tree name, edge e, |

3105 | enum tree_code cond_code, |

3106 | tree cond_op0, tree cond_op1, bool invert, |

3107 | vec<assert_info> &asserts) |

3108 | { |

3109 | tree val; |

3110 | enum tree_code comp_code; |

3111 | |

3112 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |

3113 | cond_op0, |

3114 | cond_op1, |

3115 | invert, &comp_code, &val)) |

3116 | return; |

3117 | |

3118 | /* Queue the assert. */ |

3119 | tree x; |

3120 | if (overflow_comparison_p (comp_code, name, val, false, &x)) |

3121 | { |

3122 | enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR) |

3123 | ? GT_EXPR : LE_EXPR); |

3124 | add_assert_info (asserts, name, name, new_code, x); |

3125 | } |

3126 | add_assert_info (asserts, name, name, comp_code, val); |

3127 | |

3128 | /* In the case of NAME <= CST and NAME being defined as |

3129 | NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 |

3130 | and NAME2 <= CST - CST2. We can do the same for NAME > CST. |

3131 | This catches range and anti-range tests. */ |

3132 | if ((comp_code == LE_EXPR |

3133 | || comp_code == GT_EXPR) |

3134 | && TREE_CODE (val) == INTEGER_CST |

3135 | && TYPE_UNSIGNED (TREE_TYPE (val))) |

3136 | { |

3137 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |

3138 | tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |

3139 | |

3140 | /* Extract CST2 from the (optional) addition. */ |

3141 | if (is_gimple_assign (def_stmt) |

3142 | && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) |

3143 | { |

3144 | name2 = gimple_assign_rhs1 (def_stmt); |

3145 | cst2 = gimple_assign_rhs2 (def_stmt); |

3146 | if (TREE_CODE (name2) == SSA_NAME |

3147 | && TREE_CODE (cst2) == INTEGER_CST) |

3148 | def_stmt = SSA_NAME_DEF_STMT (name2); |

3149 | } |

3150 | |

3151 | /* Extract NAME2 from the (optional) sign-changing cast. */ |

3152 | if (gimple_assign_cast_p (def_stmt)) |

3153 | { |

3154 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |

3155 | && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |

3156 | && (TYPE_PRECISION (gimple_expr_type (def_stmt)) |

3157 | == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) |

3158 | name3 = gimple_assign_rhs1 (def_stmt); |

3159 | } |

3160 | |

3161 | /* If name3 is used later, create an ASSERT_EXPR for it. */ |

3162 | if (name3 != NULL_TREE |

3163 | && TREE_CODE (name3) == SSA_NAME |

3164 | && (cst2 == NULL_TREE |

3165 | || TREE_CODE (cst2) == INTEGER_CST) |

3166 | && INTEGRAL_TYPE_P (TREE_TYPE (name3))) |

3167 | { |

3168 | tree tmp; |

3169 | |

3170 | /* Build an expression for the range test. */ |

3171 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); |

3172 | if (cst2 != NULL_TREE) |

3173 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |

3174 | |

3175 | if (dump_file) |

3176 | { |

3177 | fprintf (dump_file, "Adding assert for "); |

3178 | print_generic_expr (dump_file, name3); |

3179 | fprintf (dump_file, " from "); |

3180 | print_generic_expr (dump_file, tmp); |

3181 | fprintf (dump_file, "\n"); |

3182 | } |

3183 | |

3184 | add_assert_info (asserts, name3, tmp, comp_code, val); |

3185 | } |

3186 | |

3187 | /* If name2 is used later, create an ASSERT_EXPR for it. */ |

3188 | if (name2 != NULL_TREE |

3189 | && TREE_CODE (name2) == SSA_NAME |

3190 | && TREE_CODE (cst2) == INTEGER_CST |

3191 | && INTEGRAL_TYPE_P (TREE_TYPE (name2))) |

3192 | { |

3193 | tree tmp; |

3194 | |

3195 | /* Build an expression for the range test. */ |

3196 | tmp = name2; |

3197 | if (TREE_TYPE (name) != TREE_TYPE (name2)) |

3198 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); |

3199 | if (cst2 != NULL_TREE) |

3200 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); |

3201 | |

3202 | if (dump_file) |

3203 | { |

3204 | fprintf (dump_file, "Adding assert for "); |

3205 | print_generic_expr (dump_file, name2); |

3206 | fprintf (dump_file, " from "); |

3207 | print_generic_expr (dump_file, tmp); |

3208 | fprintf (dump_file, "\n"); |

3209 | } |

3210 | |

3211 | add_assert_info (asserts, name2, tmp, comp_code, val); |

3212 | } |

3213 | } |

3214 | |

3215 | /* In the case of post-in/decrement tests like if (i++) ... and uses |

3216 | of the in/decremented value on the edge the extra name we want to |

3217 | assert for is not on the def chain of the name compared. Instead |

3218 | it is in the set of use stmts. |

3219 | Similar cases happen for conversions that were simplified through |

3220 | fold_{sign_changed,widened}_comparison. */ |

3221 | if ((comp_code == NE_EXPR |

3222 | || comp_code == EQ_EXPR) |

3223 | && TREE_CODE (val) == INTEGER_CST) |

3224 | { |

3225 | imm_use_iterator ui; |

3226 | gimple *use_stmt; |

3227 | FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) |

3228 | { |

3229 | if (!is_gimple_assign (use_stmt)) |

3230 | continue; |

3231 | |

3232 | /* Cut off to use-stmts that are dominating the predecessor. */ |

3233 | if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt))) |

3234 | continue; |

3235 | |

3236 | tree name2 = gimple_assign_lhs (use_stmt); |

3237 | if (TREE_CODE (name2) != SSA_NAME) |

3238 | continue; |

3239 | |

3240 | enum tree_code code = gimple_assign_rhs_code (use_stmt); |

3241 | tree cst; |

3242 | if (code == PLUS_EXPR |

3243 | || code == MINUS_EXPR) |

3244 | { |

3245 | cst = gimple_assign_rhs2 (use_stmt); |

3246 | if (TREE_CODE (cst) != INTEGER_CST) |

3247 | continue; |

3248 | cst = int_const_binop (code, val, cst); |

3249 | } |

3250 | else if (CONVERT_EXPR_CODE_P (code)) |

3251 | { |

3252 | /* For truncating conversions we cannot record |

3253 | an inequality. */ |

3254 | if (comp_code == NE_EXPR |

3255 | && (TYPE_PRECISION (TREE_TYPE (name2)) |

3256 | < TYPE_PRECISION (TREE_TYPE (name)))) |

3257 | continue; |

3258 | cst = fold_convert (TREE_TYPE (name2), val); |

3259 | } |

3260 | else |

3261 | continue; |

3262 | |

3263 | if (TREE_OVERFLOW_P (cst)) |

3264 | cst = drop_tree_overflow (cst); |

3265 | add_assert_info (asserts, name2, name2, comp_code, cst); |

3266 | } |

3267 | } |

3268 | |

3269 | if (TREE_CODE_CLASS (comp_code) == tcc_comparison |

3270 | && TREE_CODE (val) == INTEGER_CST) |

3271 | { |

3272 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |

3273 | tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; |

3274 | tree val2 = NULL_TREE; |

3275 | unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); |

3276 | wide_int mask = wi::zero (prec); |

3277 | unsigned int nprec = prec; |

3278 | enum tree_code rhs_code = ERROR_MARK; |

3279 | |

3280 | if (is_gimple_assign (def_stmt)) |

3281 | rhs_code = gimple_assign_rhs_code (def_stmt); |

3282 | |

3283 | /* In the case of NAME != CST1 where NAME = A +- CST2 we can |

3284 | assert that A != CST1 -+ CST2. */ |

3285 | if ((comp_code == EQ_EXPR || comp_code == NE_EXPR) |

3286 | && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR)) |

3287 | { |

3288 | tree op0 = gimple_assign_rhs1 (def_stmt); |

3289 | tree op1 = gimple_assign_rhs2 (def_stmt); |

3290 | if (TREE_CODE (op0) == SSA_NAME |

3291 | && TREE_CODE (op1) == INTEGER_CST) |

3292 | { |

3293 | enum tree_code reverse_op = (rhs_code == PLUS_EXPR |

3294 | ? MINUS_EXPR : PLUS_EXPR); |

3295 | op1 = int_const_binop (reverse_op, val, op1); |

3296 | if (TREE_OVERFLOW (op1)) |

3297 | op1 = drop_tree_overflow (op1); |

3298 | add_assert_info (asserts, op0, op0, comp_code, op1); |

3299 | } |

3300 | } |

3301 | |

3302 | /* Add asserts for NAME cmp CST and NAME being defined |

3303 | as NAME = (int) NAME2. */ |

3304 | if (!TYPE_UNSIGNED (TREE_TYPE (val)) |

3305 | && (comp_code == LE_EXPR || comp_code == LT_EXPR |

3306 | || comp_code == GT_EXPR || comp_code == GE_EXPR) |

3307 | && gimple_assign_cast_p (def_stmt)) |

3308 | { |

3309 | name2 = gimple_assign_rhs1 (def_stmt); |

3310 | if (CONVERT_EXPR_CODE_P (rhs_code) |

3311 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |

3312 | && TYPE_UNSIGNED (TREE_TYPE (name2)) |

3313 | && prec == TYPE_PRECISION (TREE_TYPE (name2)) |

3314 | && (comp_code == LE_EXPR || comp_code == GT_EXPR |

3315 | || !tree_int_cst_equal (val, |

3316 | TYPE_MIN_VALUE (TREE_TYPE (val))))) |

3317 | { |

3318 | tree tmp, cst; |

3319 | enum tree_code new_comp_code = comp_code; |

3320 | |

3321 | cst = fold_convert (TREE_TYPE (name2), |

3322 | TYPE_MIN_VALUE (TREE_TYPE (val))); |

3323 | /* Build an expression for the range test. */ |

3324 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); |

3325 | cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, |

3326 | fold_convert (TREE_TYPE (name2), val)); |

3327 | if (comp_code == LT_EXPR || comp_code == GE_EXPR) |

3328 | { |

3329 | new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; |

3330 | cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, |

3331 | build_int_cst (TREE_TYPE (name2), 1)); |

3332 | } |

3333 | |

3334 | if (dump_file) |

3335 | { |

3336 | fprintf (dump_file, "Adding assert for "); |

3337 | print_generic_expr (dump_file, name2); |

3338 | fprintf (dump_file, " from "); |

3339 | print_generic_expr (dump_file, tmp); |

3340 | fprintf (dump_file, "\n"); |

3341 | } |

3342 | |

3343 | add_assert_info (asserts, name2, tmp, new_comp_code, cst); |

3344 | } |

3345 | } |

3346 | |

3347 | /* Add asserts for NAME cmp CST and NAME being defined as |

3348 | NAME = NAME2 >> CST2. |

3349 | |

3350 | Extract CST2 from the right shift. */ |

3351 | if (rhs_code == RSHIFT_EXPR) |

3352 | { |

3353 | name2 = gimple_assign_rhs1 (def_stmt); |

3354 | cst2 = gimple_assign_rhs2 (def_stmt); |

3355 | if (TREE_CODE (name2) == SSA_NAME |

3356 | && tree_fits_uhwi_p (cst2) |

3357 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |

3358 | && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) |

3359 | && type_has_mode_precision_p (TREE_TYPE (val))) |

3360 | { |

3361 | mask = wi::mask (tree_to_uhwi (cst2), false, prec); |

3362 | val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); |

3363 | } |

3364 | } |

3365 | if (val2 != NULL_TREE |

3366 | && TREE_CODE (val2) == INTEGER_CST |

3367 | && simple_cst_equal (fold_build2 (RSHIFT_EXPR, |

3368 | TREE_TYPE (val), |

3369 | val2, cst2), val)) |

3370 | { |

3371 | enum tree_code new_comp_code = comp_code; |

3372 | tree tmp, new_val; |

3373 | |

3374 | tmp = name2; |

3375 | if (comp_code == EQ_EXPR || comp_code == NE_EXPR) |

3376 | { |

3377 | if (!TYPE_UNSIGNED (TREE_TYPE (val))) |

3378 | { |

3379 | tree type = build_nonstandard_integer_type (prec, 1); |

3380 | tmp = build1 (NOP_EXPR, type, name2); |

3381 | val2 = fold_convert (type, val2); |

3382 | } |

3383 | tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); |

3384 | new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); |

3385 | new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; |

3386 | } |

3387 | else if (comp_code == LT_EXPR || comp_code == GE_EXPR) |

3388 | { |

3389 | wide_int minval |

3390 | = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); |

3391 | new_val = val2; |

3392 | if (minval == wi::to_wide (new_val)) |

3393 | new_val = NULL_TREE; |

3394 | } |

3395 | else |

3396 | { |

3397 | wide_int maxval |

3398 | = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); |

3399 | mask |= wi::to_wide (val2); |

3400 | if (wi::eq_p (mask, maxval)) |

3401 | new_val = NULL_TREE; |

3402 | else |

3403 | new_val = wide_int_to_tree (TREE_TYPE (val2), mask); |

3404 | } |

3405 | |

3406 | if (new_val) |

3407 | { |

3408 | if (dump_file) |

3409 | { |

3410 | fprintf (dump_file, "Adding assert for "); |

3411 | print_generic_expr (dump_file, name2); |

3412 | fprintf (dump_file, " from "); |

3413 | print_generic_expr (dump_file, tmp); |

3414 | fprintf (dump_file, "\n"); |

3415 | } |

3416 | |

3417 | add_assert_info (asserts, name2, tmp, new_comp_code, new_val); |

3418 | } |

3419 | } |

3420 | |

3421 | /* Add asserts for NAME cmp CST and NAME being defined as |

3422 | NAME = NAME2 & CST2. |

3423 | |

3424 | Extract CST2 from the and. |

3425 | |

3426 | Also handle |

3427 | NAME = (unsigned) NAME2; |

3428 | casts where NAME's type is unsigned and has smaller precision |

3429 | than NAME2's type as if it was NAME = NAME2 & MASK. */ |

3430 | names[0] = NULL_TREE; |

3431 | names[1] = NULL_TREE; |

3432 | cst2 = NULL_TREE; |

3433 | if (rhs_code == BIT_AND_EXPR |

3434 | || (CONVERT_EXPR_CODE_P (rhs_code) |

3435 | && INTEGRAL_TYPE_P (TREE_TYPE (val)) |

3436 | && TYPE_UNSIGNED (TREE_TYPE (val)) |

3437 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |

3438 | > prec)) |

3439 | { |

3440 | name2 = gimple_assign_rhs1 (def_stmt); |

3441 | if (rhs_code == BIT_AND_EXPR) |

3442 | cst2 = gimple_assign_rhs2 (def_stmt); |

3443 | else |

3444 | { |

3445 | cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); |

3446 | nprec = TYPE_PRECISION (TREE_TYPE (name2)); |

3447 | } |

3448 | if (TREE_CODE (name2) == SSA_NAME |

3449 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |

3450 | && TREE_CODE (cst2) == INTEGER_CST |

3451 | && !integer_zerop (cst2) |

3452 | && (nprec > 1 |

3453 | || TYPE_UNSIGNED (TREE_TYPE (val)))) |

3454 | { |

3455 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2); |

3456 | if (gimple_assign_cast_p (def_stmt2)) |

3457 | { |

3458 | names[1] = gimple_assign_rhs1 (def_stmt2); |

3459 | if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) |

3460 | || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) |

3461 | || (TYPE_PRECISION (TREE_TYPE (name2)) |

3462 | != TYPE_PRECISION (TREE_TYPE (names[1])))) |

3463 | names[1] = NULL_TREE; |

3464 | } |

3465 | names[0] = name2; |

3466 | } |

3467 | } |

3468 | if (names[0] || names[1]) |

3469 | { |

3470 | wide_int minv, maxv, valv, cst2v; |

3471 | wide_int tem, sgnbit; |

3472 | bool valid_p = false, valn, cst2n; |

3473 | enum tree_code ccode = comp_code; |

3474 | |

3475 | valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED); |

3476 | cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED); |

3477 | valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); |

3478 | cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); |

3479 | /* If CST2 doesn't have most significant bit set, |

3480 | but VAL is negative, we have comparison like |

3481 | if ((x & 0x123) > -4) (always true). Just give up. */ |

3482 | if (!cst2n && valn) |

3483 | ccode = ERROR_MARK; |

3484 | if (cst2n) |

3485 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |

3486 | else |

3487 | sgnbit = wi::zero (nprec); |

3488 | minv = valv & cst2v; |

3489 | switch (ccode) |

3490 | { |

3491 | case EQ_EXPR: |

3492 | /* Minimum unsigned value for equality is VAL & CST2 |

3493 | (should be equal to VAL, otherwise we probably should |

3494 | have folded the comparison into false) and |

3495 | maximum unsigned value is VAL | ~CST2. */ |

3496 | maxv = valv | ~cst2v; |

3497 | valid_p = true; |

3498 | break; |

3499 | |

3500 | case NE_EXPR: |

3501 | tem = valv | ~cst2v; |

3502 | /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ |

3503 | if (valv == 0) |

3504 | { |

3505 | cst2n = false; |

3506 | sgnbit = wi::zero (nprec); |

3507 | goto gt_expr; |

3508 | } |

3509 | /* If (VAL | ~CST2) is all ones, handle it as |

3510 | (X & CST2) < VAL. */ |

3511 | if (tem == -1) |

3512 | { |

3513 | cst2n = false; |

3514 | valn = false; |

3515 | sgnbit = wi::zero (nprec); |

3516 | goto lt_expr; |

3517 | } |

3518 | if (!cst2n && wi::neg_p (cst2v)) |

3519 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |

3520 | if (sgnbit != 0) |

3521 | { |

3522 | if (valv == sgnbit) |

3523 | { |

3524 | cst2n = true; |

3525 | valn = true; |

3526 | goto gt_expr; |

3527 | } |

3528 | if (tem == wi::mask (nprec - 1, false, nprec)) |

3529 | { |

3530 | cst2n = true; |

3531 | goto lt_expr; |

3532 | } |

3533 | if (!cst2n) |

3534 | sgnbit = wi::zero (nprec); |

3535 | } |

3536 | break; |

3537 | |

3538 | case GE_EXPR: |

3539 | /* Minimum unsigned value for >= if (VAL & CST2) == VAL |

3540 | is VAL and maximum unsigned value is ~0. For signed |

3541 | comparison, if CST2 doesn't have most significant bit |

3542 | set, handle it similarly. If CST2 has MSB set, |

3543 | the minimum is the same, and maximum is ~0U/2. */ |

3544 | if (minv != valv) |

3545 | { |

3546 | /* If (VAL & CST2) != VAL, X & CST2 can't be equal to |

3547 | VAL. */ |

3548 | minv = masked_increment (valv, cst2v, sgnbit, nprec); |

3549 | if (minv == valv) |

3550 | break; |

3551 | } |

3552 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |

3553 | valid_p = true; |

3554 | break; |

3555 | |

3556 | case GT_EXPR: |

3557 | gt_expr: |

3558 | /* Find out smallest MINV where MINV > VAL |

3559 | && (MINV & CST2) == MINV, if any. If VAL is signed and |

3560 | CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ |

3561 | minv = masked_increment (valv, cst2v, sgnbit, nprec); |

3562 | if (minv == valv) |

3563 | break; |

3564 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |

3565 | valid_p = true; |

3566 | break; |

3567 | |

3568 | case LE_EXPR: |

3569 | /* Minimum unsigned value for <= is 0 and maximum |

3570 | unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. |

3571 | Otherwise, find smallest VAL2 where VAL2 > VAL |

3572 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 |

3573 | as maximum. |

3574 | For signed comparison, if CST2 doesn't have most |

3575 | significant bit set, handle it similarly. If CST2 has |

3576 | MSB set, the maximum is the same and minimum is INT_MIN. */ |

3577 | if (minv == valv) |

3578 | maxv = valv; |

3579 | else |

3580 | { |

3581 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |

3582 | if (maxv == valv) |

3583 | break; |

3584 | maxv -= 1; |

3585 | } |

3586 | maxv |= ~cst2v; |

3587 | minv = sgnbit; |

3588 | valid_p = true; |

3589 | break; |

3590 | |

3591 | case LT_EXPR: |

3592 | lt_expr: |

3593 | /* Minimum unsigned value for < is 0 and maximum |

3594 | unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. |

3595 | Otherwise, find smallest VAL2 where VAL2 > VAL |

3596 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 |

3597 | as maximum. |

3598 | For signed comparison, if CST2 doesn't have most |

3599 | significant bit set, handle it similarly. If CST2 has |

3600 | MSB set, the maximum is the same and minimum is INT_MIN. */ |

3601 | if (minv == valv) |

3602 | { |

3603 | if (valv == sgnbit) |

3604 | break; |

3605 | maxv = valv; |

3606 | } |

3607 | else |

3608 | { |

3609 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |

3610 | if (maxv == valv) |

3611 | break; |

3612 | } |

3613 | maxv -= 1; |

3614 | maxv |= ~cst2v; |

3615 | minv = sgnbit; |

3616 | valid_p = true; |

3617 | break; |

3618 | |

3619 | default: |

3620 | break; |

3621 | } |

3622 | if (valid_p |

3623 | && (maxv - minv) != -1) |

3624 | { |

3625 | tree tmp, new_val, type; |

3626 | int i; |

3627 | |

3628 | for (i = 0; i < 2; i++) |

3629 | if (names[i]) |

3630 | { |

3631 | wide_int maxv2 = maxv; |

3632 | tmp = names[i]; |

3633 | type = TREE_TYPE (names[i]); |

3634 | if (!TYPE_UNSIGNED (type)) |

3635 | { |

3636 | type = build_nonstandard_integer_type (nprec, 1); |

3637 | tmp = build1 (NOP_EXPR, type, names[i]); |

3638 | } |

3639 | if (minv != 0) |

3640 | { |

3641 | tmp = build2 (PLUS_EXPR, type, tmp, |

3642 | wide_int_to_tree (type, -minv)); |

3643 | maxv2 = maxv - minv; |

3644 | } |

3645 | new_val = wide_int_to_tree (type, maxv2); |

3646 | |

3647 | if (dump_file) |

3648 | { |

3649 | fprintf (dump_file, "Adding assert for "); |

3650 | print_generic_expr (dump_file, names[i]); |

3651 | fprintf (dump_file, " from "); |

3652 | print_generic_expr (dump_file, tmp); |

3653 | fprintf (dump_file, "\n"); |

3654 | } |

3655 | |

3656 | add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val); |

3657 | } |

3658 | } |

3659 | } |

3660 | } |

3661 | } |

3662 | |

3663 | /* OP is an operand of a truth value expression which is known to have |

3664 | a particular value. Register any asserts for OP and for any |

3665 | operands in OP's defining statement. |

3666 | |

3667 | If CODE is EQ_EXPR, then we want to register OP is zero (false), |

3668 | if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ |

3669 | |

3670 | static void |

3671 | register_edge_assert_for_1 (tree op, enum tree_code code, |

3672 | edge e, vec<assert_info> &asserts) |

3673 | { |

3674 | gimple *op_def; |

3675 | tree val; |

3676 | enum tree_code rhs_code; |

3677 | |

3678 | /* We only care about SSA_NAMEs. */ |

3679 | if (TREE_CODE (op) != SSA_NAME) |

3680 | return; |

3681 | |

3682 | /* We know that OP will have a zero or nonzero value. */ |

3683 | val = build_int_cst (TREE_TYPE (op), 0); |

3684 | add_assert_info (asserts, op, op, code, val); |

3685 | |

3686 | /* Now look at how OP is set. If it's set from a comparison, |

3687 | a truth operation or some bit operations, then we may be able |

3688 | to register information about the operands of that assignment. */ |

3689 | op_def = SSA_NAME_DEF_STMT (op); |

3690 | if (gimple_code (op_def) != GIMPLE_ASSIGN) |

3691 | return; |

3692 | |

3693 | rhs_code = gimple_assign_rhs_code (op_def); |

3694 | |

3695 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |

3696 | { |

3697 | bool invert = (code == EQ_EXPR ? true : false); |

3698 | tree op0 = gimple_assign_rhs1 (op_def); |

3699 | tree op1 = gimple_assign_rhs2 (op_def); |

3700 | |

3701 | if (TREE_CODE (op0) == SSA_NAME) |

3702 | register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts); |

3703 | if (TREE_CODE (op1) == SSA_NAME) |

3704 | register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts); |

3705 | } |

3706 | else if ((code == NE_EXPR |

3707 | && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) |

3708 | || (code == EQ_EXPR |

3709 | && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) |

3710 | { |

3711 | /* Recurse on each operand. */ |

3712 | tree op0 = gimple_assign_rhs1 (op_def); |

3713 | tree op1 = gimple_assign_rhs2 (op_def); |

3714 | if (TREE_CODE (op0) == SSA_NAME |

3715 | && has_single_use (op0)) |

3716 | register_edge_assert_for_1 (op0, code, e, asserts); |

3717 | if (TREE_CODE (op1) == SSA_NAME |

3718 | && has_single_use (op1)) |

3719 | register_edge_assert_for_1 (op1, code, e, asserts); |

3720 | } |

3721 | else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR |

3722 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) |

3723 | { |

3724 | /* Recurse, flipping CODE. */ |

3725 | code = invert_tree_comparison (code, false); |

3726 | register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts); |

3727 | } |

3728 | else if (gimple_assign_rhs_code (op_def) == SSA_NAME) |

3729 | { |

3730 | /* Recurse through the copy. */ |

3731 | register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts); |

3732 | } |

3733 | else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) |

3734 | { |

3735 | /* Recurse through the type conversion, unless it is a narrowing |

3736 | conversion or conversion from non-integral type. */ |

3737 | tree rhs = gimple_assign_rhs1 (op_def); |

3738 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs)) |

3739 | && (TYPE_PRECISION (TREE_TYPE (rhs)) |

3740 | <= TYPE_PRECISION (TREE_TYPE (op)))) |

3741 | register_edge_assert_for_1 (rhs, code, e, asserts); |

3742 | } |

3743 | } |

3744 | |

3745 | /* Check if comparison |

3746 | NAME COND_OP INTEGER_CST |

3747 | has a form of |

3748 | (X & 11...100..0) COND_OP XX...X00...0 |

3749 | Such comparison can yield assertions like |

3750 | X >= XX...X00...0 |

3751 | X <= XX...X11...1 |

3752 | in case of COND_OP being NE_EXPR or |

3753 | X < XX...X00...0 |

3754 | X > XX...X11...1 |

3755 | in case of EQ_EXPR. */ |

3756 | |

3757 | static bool |

3758 | is_masked_range_test (tree name, tree valt, enum tree_code cond_code, |

3759 | tree *new_name, tree *low, enum tree_code *low_code, |

3760 | tree *high, enum tree_code *high_code) |

3761 | { |

3762 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |

3763 | |

3764 | if (!is_gimple_assign (def_stmt) |

3765 | || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR) |

3766 | return false; |

3767 | |

3768 | tree t = gimple_assign_rhs1 (def_stmt); |

3769 | tree maskt = gimple_assign_rhs2 (def_stmt); |

3770 | if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST) |

3771 | return false; |

3772 | |

3773 | wi::tree_to_wide_ref mask = wi::to_wide (maskt); |

3774 | wide_int inv_mask = ~mask; |

3775 | /* Assume VALT is INTEGER_CST. */ |

3776 | wi::tree_to_wide_ref val = wi::to_wide (valt); |

3777 | |

3778 | if ((inv_mask & (inv_mask + 1)) != 0 |

3779 | || (val & mask) != val) |

3780 | return false; |

3781 | |

3782 | bool is_range = cond_code == EQ_EXPR; |

3783 | |

3784 | tree type = TREE_TYPE (t); |

3785 | wide_int min = wi::min_value (type), |

3786 | max = wi::max_value (type); |

3787 | |

3788 | if (is_range) |

3789 | { |

3790 | *low_code = val == min ? ERROR_MARK : GE_EXPR; |

3791 | *high_code = val == max ? ERROR_MARK : LE_EXPR; |

3792 | } |

3793 | else |

3794 | { |

3795 | /* We can still generate assertion if one of alternatives |

3796 | is known to always be false. */ |

3797 | if (val == min) |

3798 | { |

3799 | *low_code = (enum tree_code) 0; |

3800 | *high_code = GT_EXPR; |

3801 | } |

3802 | else if ((val | inv_mask) == max) |

3803 | { |

3804 | *low_code = LT_EXPR; |

3805 | *high_code = (enum tree_code) 0; |

3806 | } |

3807 | else |

3808 | return false; |

3809 | } |

3810 | |

3811 | *new_name = t; |

3812 | *low = wide_int_to_tree (type, val); |

3813 | *high = wide_int_to_tree (type, val | inv_mask); |

3814 | |

3815 | if (wi::neg_p (val, TYPE_SIGN (type))) |

3816 | std::swap (*low, *high); |

3817 | |

3818 | return true; |

3819 | } |

3820 | |

3821 | /* Try to register an edge assertion for SSA name NAME on edge E for |

3822 | the condition COND contributing to the conditional jump pointed to by |

3823 | SI. */ |

3824 | |

3825 | void |

3826 | register_edge_assert_for (tree name, edge e, |

3827 | enum tree_code cond_code, tree cond_op0, |

3828 | tree cond_op1, vec<assert_info> &asserts) |

3829 | { |

3830 | tree val; |

3831 | enum tree_code comp_code; |

3832 | bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; |

3833 | |

3834 | /* Do not attempt to infer anything in names that flow through |

3835 | abnormal edges. */ |

3836 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |

3837 | return; |

3838 | |

3839 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |

3840 | cond_op0, cond_op1, |

3841 | is_else_edge, |

3842 | &comp_code, &val)) |

3843 | return; |

3844 | |

3845 | /* Register ASSERT_EXPRs for name. */ |

3846 | register_edge_assert_for_2 (name, e, cond_code, cond_op0, |

3847 | cond_op1, is_else_edge, asserts); |

3848 | |

3849 | |

3850 | /* If COND is effectively an equality test of an SSA_NAME against |

3851 | the value zero or one, then we may be able to assert values |

3852 | for SSA_NAMEs which flow into COND. */ |

3853 | |

3854 | /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining |

3855 | statement of NAME we can assert both operands of the BIT_AND_EXPR |

3856 | have nonzero value. */ |

3857 | if (((comp_code == EQ_EXPR && integer_onep (val)) |

3858 | || (comp_code == NE_EXPR && integer_zerop (val)))) |

3859 | { |

3860 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |

3861 | |

3862 | if (is_gimple_assign (def_stmt) |

3863 | && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) |

3864 | { |

3865 | tree op0 = gimple_assign_rhs1 (def_stmt); |

3866 | tree op1 = gimple_assign_rhs2 (def_stmt); |

3867 | register_edge_assert_for_1 (op0, NE_EXPR, e, asserts); |

3868 | register_edge_assert_for_1 (op1, NE_EXPR, e, asserts); |

3869 | } |

3870 | } |

3871 | |

3872 | /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining |

3873 | statement of NAME we can assert both operands of the BIT_IOR_EXPR |

3874 | have zero value. */ |

3875 | if (((comp_code == EQ_EXPR && integer_zerop (val)) |

3876 | || (comp_code == NE_EXPR && integer_onep (val)))) |

3877 | { |

3878 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |

3879 | |

3880 | /* For BIT_IOR_EXPR only if NAME == 0 both operands have |

3881 | necessarily zero value, or if type-precision is one. */ |

3882 | if (is_gimple_assign (def_stmt) |

3883 | && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR |

3884 | && (TYPE_PRECISION (TREE_TYPE (name)) == 1 |

3885 | || comp_code == EQ_EXPR))) |

3886 | { |

3887 | tree op0 = gimple_assign_rhs1 (def_stmt); |

3888 | tree op1 = gimple_assign_rhs2 (def_stmt); |

3889 | register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts); |

3890 | register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts); |

3891 | } |

3892 | } |

3893 | |

3894 | /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */ |

3895 | if ((comp_code == EQ_EXPR || comp_code == NE_EXPR) |

3896 | && TREE_CODE (val) == INTEGER_CST) |

3897 | { |

3898 | enum tree_code low_code, high_code; |

3899 | tree low, high; |

3900 | if (is_masked_range_test (name, val, comp_code, &name, &low, |

3901 | &low_code, &high, &high_code)) |

3902 | { |

3903 | if (low_code != ERROR_MARK) |

3904 | register_edge_assert_for_2 (name, e, low_code, name, |

3905 | low, /*invert*/false, asserts); |

3906 | if (high_code != ERROR_MARK) |

3907 | register_edge_assert_for_2 (name, e, high_code, name, |

3908 | high, /*invert*/false, asserts); |

3909 | } |

3910 | } |

3911 | } |

3912 | |

3913 | /* Finish found ASSERTS for E and register them at GSI. */ |

3914 | |

3915 | static void |

3916 | finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi, |

3917 | vec<assert_info> &asserts) |

3918 | { |

3919 | for (unsigned i = 0; i < asserts.length (); ++i) |

3920 | /* Only register an ASSERT_EXPR if NAME was found in the sub-graph |

3921 | reachable from E. */ |

3922 | if (live_on_edge (e, asserts[i].name)) |

3923 | register_new_assert_for (asserts[i].name, asserts[i].expr, |

3924 | asserts[i].comp_code, asserts[i].val, |

3925 | NULL, e, gsi); |

3926 | } |

3927 | |

3928 | |

3929 | |

3930 | /* Determine whether the outgoing edges of BB should receive an |

3931 | ASSERT_EXPR for each of the operands of BB's LAST statement. |

3932 | The last statement of BB must be a COND_EXPR. |

3933 | |

3934 | If any of the sub-graphs rooted at BB have an interesting use of |

3935 | the predicate operands, an assert location node is added to the |

3936 | list of assertions for the corresponding operands. */ |

3937 | |

3938 | static void |

3939 | find_conditional_asserts (basic_block bb, gcond *last) |

3940 | { |

3941 | gimple_stmt_iterator bsi; |

3942 | tree op; |

3943 | edge_iterator ei; |

3944 | edge e; |

3945 | ssa_op_iter iter; |

3946 | |

3947 | bsi = gsi_for_stmt (last); |

3948 | |

3949 | /* Look for uses of the operands in each of the sub-graphs |

3950 | rooted at BB. We need to check each of the outgoing edges |

3951 | separately, so that we know what kind of ASSERT_EXPR to |

3952 | insert. */ |

3953 | FOR_EACH_EDGE (e, ei, bb->succs) |

3954 | { |

3955 | if (e->dest == bb) |

3956 | continue; |

3957 | |

3958 | /* Register the necessary assertions for each operand in the |

3959 | conditional predicate. */ |

3960 | auto_vec<assert_info, 8> asserts; |

3961 | FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) |

3962 | register_edge_assert_for (op, e, |

3963 | gimple_cond_code (last), |

3964 | gimple_cond_lhs (last), |

3965 | gimple_cond_rhs (last), asserts); |

3966 | finish_register_edge_assert_for (e, bsi, asserts); |

3967 | } |

3968 | } |

3969 | |

3970 | struct case_info |

3971 | { |

3972 | tree expr; |

3973 | basic_block bb; |

3974 | }; |

3975 | |

3976 | /* Compare two case labels sorting first by the destination bb index |

3977 | and then by the case value. */ |

3978 | |

3979 | static int |

3980 | compare_case_labels (const void *p1, const void *p2) |

3981 | { |

3982 | const struct case_info *ci1 = (const struct case_info *) p1; |

3983 | const struct case_info *ci2 = (const struct case_info *) p2; |

3984 | int idx1 = ci1->bb->index; |

3985 | int idx2 = ci2->bb->index; |

3986 | |

3987 | if (idx1 < idx2) |

3988 | return -1; |

3989 | else if (idx1 == idx2) |

3990 | { |

3991 | /* Make sure the default label is first in a group. */ |

3992 | if (!CASE_LOW (ci1->expr)) |

3993 | return -1; |

3994 | else if (!CASE_LOW (ci2->expr)) |

3995 | return 1; |

3996 | else |

3997 | return tree_int_cst_compare (CASE_LOW (ci1->expr), |

3998 | CASE_LOW (ci2->expr)); |

3999 | } |

4000 | else |

4001 | return 1; |

4002 | } |

4003 | |

4004 | /* Determine whether the outgoing edges of BB should receive an |

4005 | ASSERT_EXPR for each of the operands of BB's LAST statement. |

4006 | The last statement of BB must be a SWITCH_EXPR. |

4007 | |

4008 | If any of the sub-graphs rooted at BB have an interesting use of |

4009 | the predicate operands, an assert location node is added to the |

4010 | list of assertions for the corresponding operands. */ |

4011 | |

4012 | static void |

4013 | find_switch_asserts (basic_block bb, gswitch *last) |

4014 | { |

4015 | gimple_stmt_iterator bsi; |

4016 | tree op; |

4017 | edge e; |

4018 | struct case_info *ci; |

4019 | size_t n = gimple_switch_num_labels (last); |

4020 | #if GCC_VERSION >= 4000 |

4021 | unsigned int idx; |

4022 | #else |

4023 | /* Work around GCC 3.4 bug (PR 37086). */ |

4024 | volatile unsigned int idx; |

4025 | #endif |

4026 | |

4027 | bsi = gsi_for_stmt (last); |

4028 | op = gimple_switch_index (last); |

4029 | if (TREE_CODE (op) != SSA_NAME) |

4030 | return; |

4031 | |

4032 | /* Build a vector of case labels sorted by destination label. */ |

4033 | ci = XNEWVEC (struct case_info, n); |

4034 | for (idx = 0; idx < n; ++idx) |

4035 | { |

4036 | ci[idx].expr = gimple_switch_label (last, idx); |

4037 | ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); |

4038 | } |

4039 | edge default_edge = find_edge (bb, ci[0].bb); |

4040 | qsort (ci, n, sizeof (struct case_info), compare_case_labels); |

4041 | |

4042 | for (idx = 0; idx < n; ++idx) |

4043 | { |

4044 | tree min, max; |

4045 | tree cl = ci[idx].expr; |

4046 | basic_block cbb = ci[idx].bb; |

4047 | |

4048 | min = CASE_LOW (cl); |

4049 | max = CASE_HIGH (cl); |

4050 | |

4051 | /* If there are multiple case labels with the same destination |

4052 | we need to combine them to a single value range for the edge. */ |

4053 | if (idx + 1 < n && cbb == ci[idx + 1].bb) |

4054 | { |

4055 | /* Skip labels until the last of the group. */ |

4056 | do { |

4057 | ++idx; |

4058 | } while (idx < n && cbb == ci[idx].bb); |

4059 | --idx; |

4060 | |

4061 | /* Pick up the maximum of the case label range. */ |

4062 | if (CASE_HIGH (ci[idx].expr)) |

4063 | max = CASE_HIGH (ci[idx].expr); |

4064 | else |

4065 | max = CASE_LOW (ci[idx].expr); |

4066 | } |

4067 | |

4068 | /* Can't extract a useful assertion out of a range that includes the |

4069 | default label. */ |

4070 | if (min == NULL_TREE) |

4071 | continue; |

4072 | |

4073 | /* Find the edge to register the assert expr on. */ |

4074 | e = find_edge (bb, cbb); |

4075 | |

4076 | /* Register the necessary assertions for the operand in the |

4077 | SWITCH_EXPR. */ |

4078 | auto_vec<assert_info, 8> asserts; |

4079 | register_edge_assert_for (op, e, |

4080 | max ? GE_EXPR : EQ_EXPR, |

4081 | op, fold_convert (TREE_TYPE (op), min), |

4082 | asserts); |

4083 | if (max) |

4084 | register_edge_assert_for (op, e, LE_EXPR, op, |

4085 | fold_convert (TREE_TYPE (op), max), |

4086 | asserts); |

4087 | finish_register_edge_assert_for (e, bsi, asserts); |

4088 | } |

4089 | |

4090 | XDELETEVEC (ci); |

4091 | |

4092 | if (! |