1 | /* Rtl-level induction variable analysis. |
---|---|

2 | Copyright (C) 2004-2017 Free Software Foundation, Inc. |

3 | |

4 | This file is part of GCC. |

5 | |

6 | GCC is free software; you can redistribute it and/or modify it |

7 | under the terms of the GNU General Public License as published by the |

8 | Free Software Foundation; either version 3, or (at your option) any |

9 | later version. |

10 | |

11 | GCC is distributed in the hope that it will be useful, but WITHOUT |

12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |

13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |

14 | for more details. |

15 | |

16 | You should have received a copy of the GNU General Public License |

17 | along with GCC; see the file COPYING3. If not see |

18 | <http://www.gnu.org/licenses/>. */ |

19 | |

20 | /* This is a simple analysis of induction variables of the loop. The major use |

21 | is for determining the number of iterations of a loop for loop unrolling, |

22 | doloop optimization and branch prediction. The iv information is computed |

23 | on demand. |

24 | |

25 | Induction variables are analyzed by walking the use-def chains. When |

26 | a basic induction variable (biv) is found, it is cached in the bivs |

27 | hash table. When register is proved to be a biv, its description |

28 | is stored to DF_REF_DATA of the def reference. |

29 | |

30 | The analysis works always with one loop -- you must call |

31 | iv_analysis_loop_init (loop) for it. All the other functions then work with |

32 | this loop. When you need to work with another loop, just call |

33 | iv_analysis_loop_init for it. When you no longer need iv analysis, call |

34 | iv_analysis_done () to clean up the memory. |

35 | |

36 | The available functions are: |

37 | |

38 | iv_analyze (insn, mode, reg, iv): Stores the description of the induction |

39 | variable corresponding to the use of register REG in INSN to IV, given |

40 | that REG has mode MODE. Returns true if REG is an induction variable |

41 | in INSN. false otherwise. If a use of REG is not found in INSN, |

42 | the following insns are scanned (so that we may call this function |

43 | on insns returned by get_condition). |

44 | iv_analyze_result (insn, def, iv): Stores to IV the description of the iv |

45 | corresponding to DEF, which is a register defined in INSN. |

46 | iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv |

47 | corresponding to expression EXPR evaluated at INSN. All registers used bu |

48 | EXPR must also be used in INSN. MODE is the mode of EXPR. |

49 | */ |

50 | |

51 | #include "config.h" |

52 | #include "system.h" |

53 | #include "coretypes.h" |

54 | #include "backend.h" |

55 | #include "rtl.h" |

56 | #include "df.h" |

57 | #include "memmodel.h" |

58 | #include "emit-rtl.h" |

59 | #include "diagnostic-core.h" |

60 | #include "cfgloop.h" |

61 | #include "intl.h" |

62 | #include "dumpfile.h" |

63 | #include "rtl-iter.h" |

64 | |

65 | /* Possible return values of iv_get_reaching_def. */ |

66 | |

67 | enum iv_grd_result |

68 | { |

69 | /* More than one reaching def, or reaching def that does not |

70 | dominate the use. */ |

71 | GRD_INVALID, |

72 | |

73 | /* The use is trivial invariant of the loop, i.e. is not changed |

74 | inside the loop. */ |

75 | GRD_INVARIANT, |

76 | |

77 | /* The use is reached by initial value and a value from the |

78 | previous iteration. */ |

79 | GRD_MAYBE_BIV, |

80 | |

81 | /* The use has single dominating def. */ |

82 | GRD_SINGLE_DOM |

83 | }; |

84 | |

85 | /* Information about a biv. */ |

86 | |

87 | struct biv_entry |

88 | { |

89 | unsigned regno; /* The register of the biv. */ |

90 | struct rtx_iv iv; /* Value of the biv. */ |

91 | }; |

92 | |

93 | static bool clean_slate = true; |

94 | |

95 | static unsigned int iv_ref_table_size = 0; |

96 | |

97 | /* Table of rtx_ivs indexed by the df_ref uid field. */ |

98 | static struct rtx_iv ** iv_ref_table; |

99 | |

100 | /* Induction variable stored at the reference. */ |

101 | #define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)] |

102 | #define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV) |

103 | |

104 | /* The current loop. */ |

105 | |

106 | static struct loop *current_loop; |

107 | |

108 | /* Hashtable helper. */ |

109 | |

110 | struct biv_entry_hasher : free_ptr_hash <biv_entry> |

111 | { |

112 | typedef rtx_def *compare_type; |

113 | static inline hashval_t hash (const biv_entry *); |

114 | static inline bool equal (const biv_entry *, const rtx_def *); |

115 | }; |

116 | |

117 | /* Returns hash value for biv B. */ |

118 | |

119 | inline hashval_t |

120 | biv_entry_hasher::hash (const biv_entry *b) |

121 | { |

122 | return b->regno; |

123 | } |

124 | |

125 | /* Compares biv B and register R. */ |

126 | |

127 | inline bool |

128 | biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r) |

129 | { |

130 | return b->regno == REGNO (r); |

131 | } |

132 | |

133 | /* Bivs of the current loop. */ |

134 | |

135 | static hash_table<biv_entry_hasher> *bivs; |

136 | |

137 | static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, struct rtx_iv *); |

138 | |

139 | /* Return the RTX code corresponding to the IV extend code EXTEND. */ |

140 | static inline enum rtx_code |

141 | iv_extend_to_rtx_code (enum iv_extend_code extend) |

142 | { |

143 | switch (extend) |

144 | { |

145 | case IV_SIGN_EXTEND: |

146 | return SIGN_EXTEND; |

147 | case IV_ZERO_EXTEND: |

148 | return ZERO_EXTEND; |

149 | case IV_UNKNOWN_EXTEND: |

150 | return UNKNOWN; |

151 | } |

152 | gcc_unreachable (); |

153 | } |

154 | |

155 | /* Dumps information about IV to FILE. */ |

156 | |

157 | extern void dump_iv_info (FILE *, struct rtx_iv *); |

158 | void |

159 | dump_iv_info (FILE *file, struct rtx_iv *iv) |

160 | { |

161 | if (!iv->base) |

162 | { |

163 | fprintf (file, "not simple"); |

164 | return; |

165 | } |

166 | |

167 | if (iv->step == const0_rtx |

168 | && !iv->first_special) |

169 | fprintf (file, "invariant "); |

170 | |

171 | print_rtl (file, iv->base); |

172 | if (iv->step != const0_rtx) |

173 | { |

174 | fprintf (file, " + "); |

175 | print_rtl (file, iv->step); |

176 | fprintf (file, " * iteration"); |

177 | } |

178 | fprintf (file, " (in %s)", GET_MODE_NAME (iv->mode)); |

179 | |

180 | if (iv->mode != iv->extend_mode) |

181 | fprintf (file, " %s to %s", |

182 | rtx_name[iv_extend_to_rtx_code (iv->extend)], |

183 | GET_MODE_NAME (iv->extend_mode)); |

184 | |

185 | if (iv->mult != const1_rtx) |

186 | { |

187 | fprintf (file, " * "); |

188 | print_rtl (file, iv->mult); |

189 | } |

190 | if (iv->delta != const0_rtx) |

191 | { |

192 | fprintf (file, " + "); |

193 | print_rtl (file, iv->delta); |

194 | } |

195 | if (iv->first_special) |

196 | fprintf (file, " (first special)"); |

197 | } |

198 | |

199 | static void |

200 | check_iv_ref_table_size (void) |

201 | { |

202 | if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ()) |

203 | { |

204 | unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4); |

205 | iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size); |

206 | memset (&iv_ref_table[iv_ref_table_size], 0, |

207 | (new_size - iv_ref_table_size) * sizeof (struct rtx_iv *)); |

208 | iv_ref_table_size = new_size; |

209 | } |

210 | } |

211 | |

212 | |

213 | /* Checks whether REG is a well-behaved register. */ |

214 | |

215 | static bool |

216 | simple_reg_p (rtx reg) |

217 | { |

218 | unsigned r; |

219 | |

220 | if (GET_CODE (reg) == SUBREG) |

221 | { |

222 | if (!subreg_lowpart_p (reg)) |

223 | return false; |

224 | reg = SUBREG_REG (reg); |

225 | } |

226 | |

227 | if (!REG_P (reg)) |

228 | return false; |

229 | |

230 | r = REGNO (reg); |

231 | if (HARD_REGISTER_NUM_P (r)) |

232 | return false; |

233 | |

234 | if (GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT) |

235 | return false; |

236 | |

237 | return true; |

238 | } |

239 | |

240 | /* Clears the information about ivs stored in df. */ |

241 | |

242 | static void |

243 | clear_iv_info (void) |

244 | { |

245 | unsigned i, n_defs = DF_DEFS_TABLE_SIZE (); |

246 | struct rtx_iv *iv; |

247 | |

248 | check_iv_ref_table_size (); |

249 | for (i = 0; i < n_defs; i++) |

250 | { |

251 | iv = iv_ref_table[i]; |

252 | if (iv) |

253 | { |

254 | free (iv); |

255 | iv_ref_table[i] = NULL; |

256 | } |

257 | } |

258 | |

259 | bivs->empty (); |

260 | } |

261 | |

262 | |

263 | /* Prepare the data for an induction variable analysis of a LOOP. */ |

264 | |

265 | void |

266 | iv_analysis_loop_init (struct loop *loop) |

267 | { |

268 | current_loop = loop; |

269 | |

270 | /* Clear the information from the analysis of the previous loop. */ |

271 | if (clean_slate) |

272 | { |

273 | df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN); |

274 | bivs = new hash_table<biv_entry_hasher> (10); |

275 | clean_slate = false; |

276 | } |

277 | else |

278 | clear_iv_info (); |

279 | |

280 | /* Get rid of the ud chains before processing the rescans. Then add |

281 | the problem back. */ |

282 | df_remove_problem (df_chain); |

283 | df_process_deferred_rescans (); |

284 | df_set_flags (DF_RD_PRUNE_DEAD_DEFS); |

285 | df_chain_add_problem (DF_UD_CHAIN); |

286 | df_note_add_problem (); |

287 | df_analyze_loop (loop); |

288 | if (dump_file) |

289 | df_dump_region (dump_file); |

290 | |

291 | check_iv_ref_table_size (); |

292 | } |

293 | |

294 | /* Finds the definition of REG that dominates loop latch and stores |

295 | it to DEF. Returns false if there is not a single definition |

296 | dominating the latch. If REG has no definition in loop, DEF |

297 | is set to NULL and true is returned. */ |

298 | |

299 | static bool |

300 | latch_dominating_def (rtx reg, df_ref *def) |

301 | { |

302 | df_ref single_rd = NULL, adef; |

303 | unsigned regno = REGNO (reg); |

304 | struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch); |

305 | |

306 | for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef)) |

307 | { |

308 | if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef)) |

309 | || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef))) |

310 | continue; |

311 | |

312 | /* More than one reaching definition. */ |

313 | if (single_rd) |

314 | return false; |

315 | |

316 | if (!just_once_each_iteration_p (current_loop, DF_REF_BB (adef))) |

317 | return false; |

318 | |

319 | single_rd = adef; |

320 | } |

321 | |

322 | *def = single_rd; |

323 | return true; |

324 | } |

325 | |

326 | /* Gets definition of REG reaching its use in INSN and stores it to DEF. */ |

327 | |

328 | static enum iv_grd_result |

329 | iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def) |

330 | { |

331 | df_ref use, adef; |

332 | basic_block def_bb, use_bb; |

333 | rtx_insn *def_insn; |

334 | bool dom_p; |

335 | |

336 | *def = NULL; |

337 | if (!simple_reg_p (reg)) |

338 | return GRD_INVALID; |

339 | if (GET_CODE (reg) == SUBREG) |

340 | reg = SUBREG_REG (reg); |

341 | gcc_assert (REG_P (reg)); |

342 | |

343 | use = df_find_use (insn, reg); |

344 | gcc_assert (use != NULL); |

345 | |

346 | if (!DF_REF_CHAIN (use)) |

347 | return GRD_INVARIANT; |

348 | |

349 | /* More than one reaching def. */ |

350 | if (DF_REF_CHAIN (use)->next) |

351 | return GRD_INVALID; |

352 | |

353 | adef = DF_REF_CHAIN (use)->ref; |

354 | |

355 | /* We do not handle setting only part of the register. */ |

356 | if (DF_REF_FLAGS (adef) & DF_REF_READ_WRITE) |

357 | return GRD_INVALID; |

358 | |

359 | def_insn = DF_REF_INSN (adef); |

360 | def_bb = DF_REF_BB (adef); |

361 | use_bb = BLOCK_FOR_INSN (insn); |

362 | |

363 | if (use_bb == def_bb) |

364 | dom_p = (DF_INSN_LUID (def_insn) < DF_INSN_LUID (insn)); |

365 | else |

366 | dom_p = dominated_by_p (CDI_DOMINATORS, use_bb, def_bb); |

367 | |

368 | if (dom_p) |

369 | { |

370 | *def = adef; |

371 | return GRD_SINGLE_DOM; |

372 | } |

373 | |

374 | /* The definition does not dominate the use. This is still OK if |

375 | this may be a use of a biv, i.e. if the def_bb dominates loop |

376 | latch. */ |

377 | if (just_once_each_iteration_p (current_loop, def_bb)) |

378 | return GRD_MAYBE_BIV; |

379 | |

380 | return GRD_INVALID; |

381 | } |

382 | |

383 | /* Sets IV to invariant CST in MODE. Always returns true (just for |

384 | consistency with other iv manipulation functions that may fail). */ |

385 | |

386 | static bool |

387 | iv_constant (struct rtx_iv *iv, scalar_int_mode mode, rtx cst) |

388 | { |

389 | iv->mode = mode; |

390 | iv->base = cst; |

391 | iv->step = const0_rtx; |

392 | iv->first_special = false; |

393 | iv->extend = IV_UNKNOWN_EXTEND; |

394 | iv->extend_mode = iv->mode; |

395 | iv->delta = const0_rtx; |

396 | iv->mult = const1_rtx; |

397 | |

398 | return true; |

399 | } |

400 | |

401 | /* Evaluates application of subreg to MODE on IV. */ |

402 | |

403 | static bool |

404 | iv_subreg (struct rtx_iv *iv, scalar_int_mode mode) |

405 | { |

406 | /* If iv is invariant, just calculate the new value. */ |

407 | if (iv->step == const0_rtx |

408 | && !iv->first_special) |

409 | { |

410 | rtx val = get_iv_value (iv, const0_rtx); |

411 | val = lowpart_subreg (mode, val, |

412 | iv->extend == IV_UNKNOWN_EXTEND |

413 | ? iv->mode : iv->extend_mode); |

414 | |

415 | iv->base = val; |

416 | iv->extend = IV_UNKNOWN_EXTEND; |

417 | iv->mode = iv->extend_mode = mode; |

418 | iv->delta = const0_rtx; |

419 | iv->mult = const1_rtx; |

420 | return true; |

421 | } |

422 | |

423 | if (iv->extend_mode == mode) |

424 | return true; |

425 | |

426 | if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (iv->mode)) |

427 | return false; |

428 | |

429 | iv->extend = IV_UNKNOWN_EXTEND; |

430 | iv->mode = mode; |

431 | |

432 | iv->base = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, |

433 | simplify_gen_binary (MULT, iv->extend_mode, |

434 | iv->base, iv->mult)); |

435 | iv->step = simplify_gen_binary (MULT, iv->extend_mode, iv->step, iv->mult); |

436 | iv->mult = const1_rtx; |

437 | iv->delta = const0_rtx; |

438 | iv->first_special = false; |

439 | |

440 | return true; |

441 | } |

442 | |

443 | /* Evaluates application of EXTEND to MODE on IV. */ |

444 | |

445 | static bool |

446 | iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode) |

447 | { |

448 | /* If iv is invariant, just calculate the new value. */ |

449 | if (iv->step == const0_rtx |

450 | && !iv->first_special) |

451 | { |

452 | rtx val = get_iv_value (iv, const0_rtx); |

453 | if (iv->extend_mode != iv->mode |

454 | && iv->extend != IV_UNKNOWN_EXTEND |

455 | && iv->extend != extend) |

456 | val = lowpart_subreg (iv->mode, val, iv->extend_mode); |

457 | val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode, |

458 | val, |

459 | iv->extend == extend |

460 | ? iv->extend_mode : iv->mode); |

461 | iv->base = val; |

462 | iv->extend = IV_UNKNOWN_EXTEND; |

463 | iv->mode = iv->extend_mode = mode; |

464 | iv->delta = const0_rtx; |

465 | iv->mult = const1_rtx; |

466 | return true; |

467 | } |

468 | |

469 | if (mode != iv->extend_mode) |

470 | return false; |

471 | |

472 | if (iv->extend != IV_UNKNOWN_EXTEND |

473 | && iv->extend != extend) |

474 | return false; |

475 | |

476 | iv->extend = extend; |

477 | |

478 | return true; |

479 | } |

480 | |

481 | /* Evaluates negation of IV. */ |

482 | |

483 | static bool |

484 | iv_neg (struct rtx_iv *iv) |

485 | { |

486 | if (iv->extend == IV_UNKNOWN_EXTEND) |

487 | { |

488 | iv->base = simplify_gen_unary (NEG, iv->extend_mode, |

489 | iv->base, iv->extend_mode); |

490 | iv->step = simplify_gen_unary (NEG, iv->extend_mode, |

491 | iv->step, iv->extend_mode); |

492 | } |

493 | else |

494 | { |

495 | iv->delta = simplify_gen_unary (NEG, iv->extend_mode, |

496 | iv->delta, iv->extend_mode); |

497 | iv->mult = simplify_gen_unary (NEG, iv->extend_mode, |

498 | iv->mult, iv->extend_mode); |

499 | } |

500 | |

501 | return true; |

502 | } |

503 | |

504 | /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */ |

505 | |

506 | static bool |

507 | iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op) |

508 | { |

509 | scalar_int_mode mode; |

510 | rtx arg; |

511 | |

512 | /* Extend the constant to extend_mode of the other operand if necessary. */ |

513 | if (iv0->extend == IV_UNKNOWN_EXTEND |

514 | && iv0->mode == iv0->extend_mode |

515 | && iv0->step == const0_rtx |

516 | && GET_MODE_SIZE (iv0->extend_mode) < GET_MODE_SIZE (iv1->extend_mode)) |

517 | { |

518 | iv0->extend_mode = iv1->extend_mode; |

519 | iv0->base = simplify_gen_unary (ZERO_EXTEND, iv0->extend_mode, |

520 | iv0->base, iv0->mode); |

521 | } |

522 | if (iv1->extend == IV_UNKNOWN_EXTEND |

523 | && iv1->mode == iv1->extend_mode |

524 | && iv1->step == const0_rtx |

525 | && GET_MODE_SIZE (iv1->extend_mode) < GET_MODE_SIZE (iv0->extend_mode)) |

526 | { |

527 | iv1->extend_mode = iv0->extend_mode; |

528 | iv1->base = simplify_gen_unary (ZERO_EXTEND, iv1->extend_mode, |

529 | iv1->base, iv1->mode); |

530 | } |

531 | |

532 | mode = iv0->extend_mode; |

533 | if (mode != iv1->extend_mode) |

534 | return false; |

535 | |

536 | if (iv0->extend == IV_UNKNOWN_EXTEND |

537 | && iv1->extend == IV_UNKNOWN_EXTEND) |

538 | { |

539 | if (iv0->mode != iv1->mode) |

540 | return false; |

541 | |

542 | iv0->base = simplify_gen_binary (op, mode, iv0->base, iv1->base); |

543 | iv0->step = simplify_gen_binary (op, mode, iv0->step, iv1->step); |

544 | |

545 | return true; |

546 | } |

547 | |

548 | /* Handle addition of constant. */ |

549 | if (iv1->extend == IV_UNKNOWN_EXTEND |

550 | && iv1->mode == mode |

551 | && iv1->step == const0_rtx) |

552 | { |

553 | iv0->delta = simplify_gen_binary (op, mode, iv0->delta, iv1->base); |

554 | return true; |

555 | } |

556 | |

557 | if (iv0->extend == IV_UNKNOWN_EXTEND |

558 | && iv0->mode == mode |

559 | && iv0->step == const0_rtx) |

560 | { |

561 | arg = iv0->base; |

562 | *iv0 = *iv1; |

563 | if (op == MINUS |

564 | && !iv_neg (iv0)) |

565 | return false; |

566 | |

567 | iv0->delta = simplify_gen_binary (PLUS, mode, iv0->delta, arg); |

568 | return true; |

569 | } |

570 | |

571 | return false; |

572 | } |

573 | |

574 | /* Evaluates multiplication of IV by constant CST. */ |

575 | |

576 | static bool |

577 | iv_mult (struct rtx_iv *iv, rtx mby) |

578 | { |

579 | scalar_int_mode mode = iv->extend_mode; |

580 | |

581 | if (GET_MODE (mby) != VOIDmode |

582 | && GET_MODE (mby) != mode) |

583 | return false; |

584 | |

585 | if (iv->extend == IV_UNKNOWN_EXTEND) |

586 | { |

587 | iv->base = simplify_gen_binary (MULT, mode, iv->base, mby); |

588 | iv->step = simplify_gen_binary (MULT, mode, iv->step, mby); |

589 | } |

590 | else |

591 | { |

592 | iv->delta = simplify_gen_binary (MULT, mode, iv->delta, mby); |

593 | iv->mult = simplify_gen_binary (MULT, mode, iv->mult, mby); |

594 | } |

595 | |

596 | return true; |

597 | } |

598 | |

599 | /* Evaluates shift of IV by constant CST. */ |

600 | |

601 | static bool |

602 | iv_shift (struct rtx_iv *iv, rtx mby) |

603 | { |

604 | scalar_int_mode mode = iv->extend_mode; |

605 | |

606 | if (GET_MODE (mby) != VOIDmode |

607 | && GET_MODE (mby) != mode) |

608 | return false; |

609 | |

610 | if (iv->extend == IV_UNKNOWN_EXTEND) |

611 | { |

612 | iv->base = simplify_gen_binary (ASHIFT, mode, iv->base, mby); |

613 | iv->step = simplify_gen_binary (ASHIFT, mode, iv->step, mby); |

614 | } |

615 | else |

616 | { |

617 | iv->delta = simplify_gen_binary (ASHIFT, mode, iv->delta, mby); |

618 | iv->mult = simplify_gen_binary (ASHIFT, mode, iv->mult, mby); |

619 | } |

620 | |

621 | return true; |

622 | } |

623 | |

624 | /* The recursive part of get_biv_step. Gets the value of the single value |

625 | defined by DEF wrto initial value of REG inside loop, in shape described |

626 | at get_biv_step. */ |

627 | |

628 | static bool |

629 | get_biv_step_1 (df_ref def, scalar_int_mode outer_mode, rtx reg, |

630 | rtx *inner_step, scalar_int_mode *inner_mode, |

631 | enum iv_extend_code *extend, |

632 | rtx *outer_step) |

633 | { |

634 | rtx set, rhs, op0 = NULL_RTX, op1 = NULL_RTX; |

635 | rtx next, nextr; |

636 | enum rtx_code code; |

637 | rtx_insn *insn = DF_REF_INSN (def); |

638 | df_ref next_def; |

639 | enum iv_grd_result res; |

640 | |

641 | set = single_set (insn); |

642 | if (!set) |

643 | return false; |

644 | |

645 | rhs = find_reg_equal_equiv_note (insn); |

646 | if (rhs) |

647 | rhs = XEXP (rhs, 0); |

648 | else |

649 | rhs = SET_SRC (set); |

650 | |

651 | code = GET_CODE (rhs); |

652 | switch (code) |

653 | { |

654 | case SUBREG: |

655 | case REG: |

656 | next = rhs; |

657 | break; |

658 | |

659 | case PLUS: |

660 | case MINUS: |

661 | op0 = XEXP (rhs, 0); |

662 | op1 = XEXP (rhs, 1); |

663 | |

664 | if (code == PLUS && CONSTANT_P (op0)) |

665 | std::swap (op0, op1); |

666 | |

667 | if (!simple_reg_p (op0) |

668 | || !CONSTANT_P (op1)) |

669 | return false; |

670 | |

671 | if (GET_MODE (rhs) != outer_mode) |

672 | { |

673 | /* ppc64 uses expressions like |

674 | |

675 | (set x:SI (plus:SI (subreg:SI y:DI) 1)). |

676 | |

677 | this is equivalent to |

678 | |

679 | (set x':DI (plus:DI y:DI 1)) |

680 | (set x:SI (subreg:SI (x':DI)). */ |

681 | if (GET_CODE (op0) != SUBREG) |

682 | return false; |

683 | if (GET_MODE (SUBREG_REG (op0)) != outer_mode) |

684 | return false; |

685 | } |

686 | |

687 | next = op0; |

688 | break; |

689 | |

690 | case SIGN_EXTEND: |

691 | case ZERO_EXTEND: |

692 | if (GET_MODE (rhs) != outer_mode) |

693 | return false; |

694 | |

695 | op0 = XEXP (rhs, 0); |

696 | if (!simple_reg_p (op0)) |

697 | return false; |

698 | |

699 | next = op0; |

700 | break; |

701 | |

702 | default: |

703 | return false; |

704 | } |

705 | |

706 | if (GET_CODE (next) == SUBREG) |

707 | { |

708 | if (!subreg_lowpart_p (next)) |

709 | return false; |

710 | |

711 | nextr = SUBREG_REG (next); |

712 | if (GET_MODE (nextr) != outer_mode) |

713 | return false; |

714 | } |

715 | else |

716 | nextr = next; |

717 | |

718 | res = iv_get_reaching_def (insn, nextr, &next_def); |

719 | |

720 | if (res == GRD_INVALID || res == GRD_INVARIANT) |

721 | return false; |

722 | |

723 | if (res == GRD_MAYBE_BIV) |

724 | { |

725 | if (!rtx_equal_p (nextr, reg)) |

726 | return false; |

727 | |

728 | *inner_step = const0_rtx; |

729 | *extend = IV_UNKNOWN_EXTEND; |

730 | *inner_mode = outer_mode; |

731 | *outer_step = const0_rtx; |

732 | } |

733 | else if (!get_biv_step_1 (next_def, outer_mode, reg, |

734 | inner_step, inner_mode, extend, |

735 | outer_step)) |

736 | return false; |

737 | |

738 | if (GET_CODE (next) == SUBREG) |

739 | { |

740 | scalar_int_mode amode; |

741 | if (!is_a <scalar_int_mode> (GET_MODE (next), &amode) |

742 | || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode)) |

743 | return false; |

744 | |

745 | *inner_mode = amode; |

746 | *inner_step = simplify_gen_binary (PLUS, outer_mode, |

747 | *inner_step, *outer_step); |

748 | *outer_step = const0_rtx; |

749 | *extend = IV_UNKNOWN_EXTEND; |

750 | } |

751 | |

752 | switch (code) |

753 | { |

754 | case REG: |

755 | case SUBREG: |

756 | break; |

757 | |

758 | case PLUS: |

759 | case MINUS: |

760 | if (*inner_mode == outer_mode |

761 | /* See comment in previous switch. */ |

762 | || GET_MODE (rhs) != outer_mode) |

763 | *inner_step = simplify_gen_binary (code, outer_mode, |

764 | *inner_step, op1); |

765 | else |

766 | *outer_step = simplify_gen_binary (code, outer_mode, |

767 | *outer_step, op1); |

768 | break; |

769 | |

770 | case SIGN_EXTEND: |

771 | case ZERO_EXTEND: |

772 | gcc_assert (GET_MODE (op0) == *inner_mode |

773 | && *extend == IV_UNKNOWN_EXTEND |

774 | && *outer_step == const0_rtx); |

775 | |

776 | *extend = (code == SIGN_EXTEND) ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |

777 | break; |

778 | |

779 | default: |

780 | return false; |

781 | } |

782 | |

783 | return true; |

784 | } |

785 | |

786 | /* Gets the operation on register REG inside loop, in shape |

787 | |

788 | OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP)) |

789 | |

790 | If the operation cannot be described in this shape, return false. |

791 | LAST_DEF is the definition of REG that dominates loop latch. */ |

792 | |

793 | static bool |

794 | get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg, |

795 | rtx *inner_step, scalar_int_mode *inner_mode, |

796 | enum iv_extend_code *extend, rtx *outer_step) |

797 | { |

798 | if (!get_biv_step_1 (last_def, outer_mode, reg, |

799 | inner_step, inner_mode, extend, |

800 | outer_step)) |

801 | return false; |

802 | |

803 | gcc_assert ((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND)); |

804 | gcc_assert (*inner_mode != outer_mode || *outer_step == const0_rtx); |

805 | |

806 | return true; |

807 | } |

808 | |

809 | /* Records information that DEF is induction variable IV. */ |

810 | |

811 | static void |

812 | record_iv (df_ref def, struct rtx_iv *iv) |

813 | { |

814 | struct rtx_iv *recorded_iv = XNEW (struct rtx_iv); |

815 | |

816 | *recorded_iv = *iv; |

817 | check_iv_ref_table_size (); |

818 | DF_REF_IV_SET (def, recorded_iv); |

819 | } |

820 | |

821 | /* If DEF was already analyzed for bivness, store the description of the biv to |

822 | IV and return true. Otherwise return false. */ |

823 | |

824 | static bool |

825 | analyzed_for_bivness_p (rtx def, struct rtx_iv *iv) |

826 | { |

827 | struct biv_entry *biv = bivs->find_with_hash (def, REGNO (def)); |

828 | |

829 | if (!biv) |

830 | return false; |

831 | |

832 | *iv = biv->iv; |

833 | return true; |

834 | } |

835 | |

836 | static void |

837 | record_biv (rtx def, struct rtx_iv *iv) |

838 | { |

839 | struct biv_entry *biv = XNEW (struct biv_entry); |

840 | biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def), INSERT); |

841 | |

842 | biv->regno = REGNO (def); |

843 | biv->iv = *iv; |

844 | gcc_assert (!*slot); |

845 | *slot = biv; |

846 | } |

847 | |

848 | /* Determines whether DEF is a biv and if so, stores its description |

849 | to *IV. OUTER_MODE is the mode of DEF. */ |

850 | |

851 | static bool |

852 | iv_analyze_biv (scalar_int_mode outer_mode, rtx def, struct rtx_iv *iv) |

853 | { |

854 | rtx inner_step, outer_step; |

855 | scalar_int_mode inner_mode; |

856 | enum iv_extend_code extend; |

857 | df_ref last_def; |

858 | |

859 | if (dump_file) |

860 | { |

861 | fprintf (dump_file, "Analyzing "); |

862 | print_rtl (dump_file, def); |

863 | fprintf (dump_file, " for bivness.\n"); |

864 | } |

865 | |

866 | if (!REG_P (def)) |

867 | { |

868 | if (!CONSTANT_P (def)) |

869 | return false; |

870 | |

871 | return iv_constant (iv, outer_mode, def); |

872 | } |

873 | |

874 | if (!latch_dominating_def (def, &last_def)) |

875 | { |

876 | if (dump_file) |

877 | fprintf (dump_file, " not simple.\n"); |

878 | return false; |

879 | } |

880 | |

881 | if (!last_def) |

882 | return iv_constant (iv, outer_mode, def); |

883 | |

884 | if (analyzed_for_bivness_p (def, iv)) |

885 | { |

886 | if (dump_file) |

887 | fprintf (dump_file, " already analysed.\n"); |

888 | return iv->base != NULL_RTX; |

889 | } |

890 | |

891 | if (!get_biv_step (last_def, outer_mode, def, &inner_step, &inner_mode, |

892 | &extend, &outer_step)) |

893 | { |

894 | iv->base = NULL_RTX; |

895 | goto end; |

896 | } |

897 | |

898 | /* Loop transforms base to es (base + inner_step) + outer_step, |

899 | where es means extend of subreg between inner_mode and outer_mode. |

900 | The corresponding induction variable is |

901 | |

902 | es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */ |

903 | |

904 | iv->base = simplify_gen_binary (MINUS, outer_mode, def, outer_step); |

905 | iv->step = simplify_gen_binary (PLUS, outer_mode, inner_step, outer_step); |

906 | iv->mode = inner_mode; |

907 | iv->extend_mode = outer_mode; |

908 | iv->extend = extend; |

909 | iv->mult = const1_rtx; |

910 | iv->delta = outer_step; |

911 | iv->first_special = inner_mode != outer_mode; |

912 | |

913 | end: |

914 | if (dump_file) |

915 | { |

916 | fprintf (dump_file, " "); |

917 | dump_iv_info (dump_file, iv); |

918 | fprintf (dump_file, "\n"); |

919 | } |

920 | |

921 | record_biv (def, iv); |

922 | return iv->base != NULL_RTX; |

923 | } |

924 | |

925 | /* Analyzes expression RHS used at INSN and stores the result to *IV. |

926 | The mode of the induction variable is MODE. */ |

927 | |

928 | bool |

929 | iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs, |

930 | struct rtx_iv *iv) |

931 | { |

932 | rtx mby = NULL_RTX; |

933 | rtx op0 = NULL_RTX, op1 = NULL_RTX; |

934 | struct rtx_iv iv0, iv1; |

935 | enum rtx_code code = GET_CODE (rhs); |

936 | scalar_int_mode omode = mode; |

937 | |

938 | iv->base = NULL_RTX; |

939 | iv->step = NULL_RTX; |

940 | |

941 | gcc_assert (GET_MODE (rhs) == mode || GET_MODE (rhs) == VOIDmode); |

942 | |

943 | if (CONSTANT_P (rhs) |

944 | || REG_P (rhs) |

945 | || code == SUBREG) |

946 | return iv_analyze_op (insn, mode, rhs, iv); |

947 | |

948 | switch (code) |

949 | { |

950 | case REG: |

951 | op0 = rhs; |

952 | break; |

953 | |

954 | case SIGN_EXTEND: |

955 | case ZERO_EXTEND: |

956 | case NEG: |

957 | op0 = XEXP (rhs, 0); |

958 | /* We don't know how many bits there are in a sign-extended constant. */ |

959 | if (!is_a <scalar_int_mode> (GET_MODE (op0), &omode)) |

960 | return false; |

961 | break; |

962 | |

963 | case PLUS: |

964 | case MINUS: |

965 | op0 = XEXP (rhs, 0); |

966 | op1 = XEXP (rhs, 1); |

967 | break; |

968 | |

969 | case MULT: |

970 | op0 = XEXP (rhs, 0); |

971 | mby = XEXP (rhs, 1); |

972 | if (!CONSTANT_P (mby)) |

973 | std::swap (op0, mby); |

974 | if (!CONSTANT_P (mby)) |

975 | return false; |

976 | break; |

977 | |

978 | case ASHIFT: |

979 | op0 = XEXP (rhs, 0); |

980 | mby = XEXP (rhs, 1); |

981 | if (!CONSTANT_P (mby)) |

982 | return false; |

983 | break; |

984 | |

985 | default: |

986 | return false; |

987 | } |

988 | |

989 | if (op0 |

990 | && !iv_analyze_expr (insn, omode, op0, &iv0)) |

991 | return false; |

992 | |

993 | if (op1 |

994 | && !iv_analyze_expr (insn, omode, op1, &iv1)) |

995 | return false; |

996 | |

997 | switch (code) |

998 | { |

999 | case SIGN_EXTEND: |

1000 | if (!iv_extend (&iv0, IV_SIGN_EXTEND, mode)) |

1001 | return false; |

1002 | break; |

1003 | |

1004 | case ZERO_EXTEND: |

1005 | if (!iv_extend (&iv0, IV_ZERO_EXTEND, mode)) |

1006 | return false; |

1007 | break; |

1008 | |

1009 | case NEG: |

1010 | if (!iv_neg (&iv0)) |

1011 | return false; |

1012 | break; |

1013 | |

1014 | case PLUS: |

1015 | case MINUS: |

1016 | if (!iv_add (&iv0, &iv1, code)) |

1017 | return false; |

1018 | break; |

1019 | |

1020 | case MULT: |

1021 | if (!iv_mult (&iv0, mby)) |

1022 | return false; |

1023 | break; |

1024 | |

1025 | case ASHIFT: |

1026 | if (!iv_shift (&iv0, mby)) |

1027 | return false; |

1028 | break; |

1029 | |

1030 | default: |

1031 | break; |

1032 | } |

1033 | |

1034 | *iv = iv0; |

1035 | return iv->base != NULL_RTX; |

1036 | } |

1037 | |

1038 | /* Analyzes iv DEF and stores the result to *IV. */ |

1039 | |

1040 | static bool |

1041 | iv_analyze_def (df_ref def, struct rtx_iv *iv) |

1042 | { |

1043 | rtx_insn *insn = DF_REF_INSN (def); |

1044 | rtx reg = DF_REF_REG (def); |

1045 | rtx set, rhs; |

1046 | |

1047 | if (dump_file) |

1048 | { |

1049 | fprintf (dump_file, "Analyzing def of "); |

1050 | print_rtl (dump_file, reg); |

1051 | fprintf (dump_file, " in insn "); |

1052 | print_rtl_single (dump_file, insn); |

1053 | } |

1054 | |

1055 | check_iv_ref_table_size (); |

1056 | if (DF_REF_IV (def)) |

1057 | { |

1058 | if (dump_file) |

1059 | fprintf (dump_file, " already analysed.\n"); |

1060 | *iv = *DF_REF_IV (def); |

1061 | return iv->base != NULL_RTX; |

1062 | } |

1063 | |

1064 | iv->base = NULL_RTX; |

1065 | iv->step = NULL_RTX; |

1066 | |

1067 | scalar_int_mode mode; |

1068 | if (!REG_P (reg) || !is_a <scalar_int_mode> (GET_MODE (reg), &mode)) |

1069 | return false; |

1070 | |

1071 | set = single_set (insn); |

1072 | if (!set) |

1073 | return false; |

1074 | |

1075 | if (!REG_P (SET_DEST (set))) |

1076 | return false; |

1077 | |

1078 | gcc_assert (SET_DEST (set) == reg); |

1079 | rhs = find_reg_equal_equiv_note (insn); |

1080 | if (rhs) |

1081 | rhs = XEXP (rhs, 0); |

1082 | else |

1083 | rhs = SET_SRC (set); |

1084 | |

1085 | iv_analyze_expr (insn, mode, rhs, iv); |

1086 | record_iv (def, iv); |

1087 | |

1088 | if (dump_file) |

1089 | { |

1090 | print_rtl (dump_file, reg); |

1091 | fprintf (dump_file, " in insn "); |

1092 | print_rtl_single (dump_file, insn); |

1093 | fprintf (dump_file, " is "); |

1094 | dump_iv_info (dump_file, iv); |

1095 | fprintf (dump_file, "\n"); |

1096 | } |

1097 | |

1098 | return iv->base != NULL_RTX; |

1099 | } |

1100 | |

1101 | /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the |

1102 | mode of OP. */ |

1103 | |

1104 | static bool |

1105 | iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, struct rtx_iv *iv) |

1106 | { |

1107 | df_ref def = NULL; |

1108 | enum iv_grd_result res; |

1109 | |

1110 | if (dump_file) |

1111 | { |

1112 | fprintf (dump_file, "Analyzing operand "); |

1113 | print_rtl (dump_file, op); |

1114 | fprintf (dump_file, " of insn "); |

1115 | print_rtl_single (dump_file, insn); |

1116 | } |

1117 | |

1118 | if (function_invariant_p (op)) |

1119 | res = GRD_INVARIANT; |

1120 | else if (GET_CODE (op) == SUBREG) |

1121 | { |

1122 | scalar_int_mode inner_mode; |

1123 | if (!subreg_lowpart_p (op) |

1124 | || !is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &inner_mode)) |

1125 | return false; |

1126 | |

1127 | if (!iv_analyze_op (insn, inner_mode, SUBREG_REG (op), iv)) |

1128 | return false; |

1129 | |

1130 | return iv_subreg (iv, mode); |

1131 | } |

1132 | else |

1133 | { |

1134 | res = iv_get_reaching_def (insn, op, &def); |

1135 | if (res == GRD_INVALID) |

1136 | { |

1137 | if (dump_file) |

1138 | fprintf (dump_file, " not simple.\n"); |

1139 | return false; |

1140 | } |

1141 | } |

1142 | |

1143 | if (res == GRD_INVARIANT) |

1144 | { |

1145 | iv_constant (iv, mode, op); |

1146 | |

1147 | if (dump_file) |

1148 | { |

1149 | fprintf (dump_file, " "); |

1150 | dump_iv_info (dump_file, iv); |

1151 | fprintf (dump_file, "\n"); |

1152 | } |

1153 | return true; |

1154 | } |

1155 | |

1156 | if (res == GRD_MAYBE_BIV) |

1157 | return iv_analyze_biv (mode, op, iv); |

1158 | |

1159 | return iv_analyze_def (def, iv); |

1160 | } |

1161 | |

1162 | /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the |

1163 | mode of VAL. */ |

1164 | |

1165 | bool |

1166 | iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, struct rtx_iv *iv) |

1167 | { |

1168 | rtx reg; |

1169 | |

1170 | /* We must find the insn in that val is used, so that we get to UD chains. |

1171 | Since the function is sometimes called on result of get_condition, |

1172 | this does not necessarily have to be directly INSN; scan also the |

1173 | following insns. */ |

1174 | if (simple_reg_p (val)) |

1175 | { |

1176 | if (GET_CODE (val) == SUBREG) |

1177 | reg = SUBREG_REG (val); |

1178 | else |

1179 | reg = val; |

1180 | |

1181 | while (!df_find_use (insn, reg)) |

1182 | insn = NEXT_INSN (insn); |

1183 | } |

1184 | |

1185 | return iv_analyze_op (insn, mode, val, iv); |

1186 | } |

1187 | |

1188 | /* Analyzes definition of DEF in INSN and stores the result to IV. */ |

1189 | |

1190 | bool |

1191 | iv_analyze_result (rtx_insn *insn, rtx def, struct rtx_iv *iv) |

1192 | { |

1193 | df_ref adef; |

1194 | |

1195 | adef = df_find_def (insn, def); |

1196 | if (!adef) |

1197 | return false; |

1198 | |

1199 | return iv_analyze_def (adef, iv); |

1200 | } |

1201 | |

1202 | /* Checks whether definition of register REG in INSN is a basic induction |

1203 | variable. MODE is the mode of REG. |

1204 | |

1205 | IV analysis must have been initialized (via a call to |

1206 | iv_analysis_loop_init) for this function to produce a result. */ |

1207 | |

1208 | bool |

1209 | biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg) |

1210 | { |

1211 | struct rtx_iv iv; |

1212 | df_ref def, last_def; |

1213 | |

1214 | if (!simple_reg_p (reg)) |

1215 | return false; |

1216 | |

1217 | def = df_find_def (insn, reg); |

1218 | gcc_assert (def != NULL); |

1219 | if (!latch_dominating_def (reg, &last_def)) |

1220 | return false; |

1221 | if (last_def != def) |

1222 | return false; |

1223 | |

1224 | if (!iv_analyze_biv (mode, reg, &iv)) |

1225 | return false; |

1226 | |

1227 | return iv.step != const0_rtx; |

1228 | } |

1229 | |

1230 | /* Calculates value of IV at ITERATION-th iteration. */ |

1231 | |

1232 | rtx |

1233 | get_iv_value (struct rtx_iv *iv, rtx iteration) |

1234 | { |

1235 | rtx val; |

1236 | |

1237 | /* We would need to generate some if_then_else patterns, and so far |

1238 | it is not needed anywhere. */ |

1239 | gcc_assert (!iv->first_special); |

1240 | |

1241 | if (iv->step != const0_rtx && iteration != const0_rtx) |

1242 | val = simplify_gen_binary (PLUS, iv->extend_mode, iv->base, |

1243 | simplify_gen_binary (MULT, iv->extend_mode, |

1244 | iv->step, iteration)); |

1245 | else |

1246 | val = iv->base; |

1247 | |

1248 | if (iv->extend_mode == iv->mode) |

1249 | return val; |

1250 | |

1251 | val = lowpart_subreg (iv->mode, val, iv->extend_mode); |

1252 | |

1253 | if (iv->extend == IV_UNKNOWN_EXTEND) |

1254 | return val; |

1255 | |

1256 | val = simplify_gen_unary (iv_extend_to_rtx_code (iv->extend), |

1257 | iv->extend_mode, val, iv->mode); |

1258 | val = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, |

1259 | simplify_gen_binary (MULT, iv->extend_mode, |

1260 | iv->mult, val)); |

1261 | |

1262 | return val; |

1263 | } |

1264 | |

1265 | /* Free the data for an induction variable analysis. */ |

1266 | |

1267 | void |

1268 | iv_analysis_done (void) |

1269 | { |

1270 | if (!clean_slate) |

1271 | { |

1272 | clear_iv_info (); |

1273 | clean_slate = true; |

1274 | df_finish_pass (true); |

1275 | delete bivs; |

1276 | bivs = NULL; |

1277 | free (iv_ref_table); |

1278 | iv_ref_table = NULL; |

1279 | iv_ref_table_size = 0; |

1280 | } |

1281 | } |

1282 | |

1283 | /* Computes inverse to X modulo (1 << MOD). */ |

1284 | |

1285 | static uint64_t |

1286 | inverse (uint64_t x, int mod) |

1287 | { |

1288 | uint64_t mask = |

1289 | ((uint64_t) 1 << (mod - 1) << 1) - 1; |

1290 | uint64_t rslt = 1; |

1291 | int i; |

1292 | |

1293 | for (i = 0; i < mod - 1; i++) |

1294 | { |

1295 | rslt = (rslt * x) & mask; |

1296 | x = (x * x) & mask; |

1297 | } |

1298 | |

1299 | return rslt; |

1300 | } |

1301 | |

1302 | /* Checks whether any register in X is in set ALT. */ |

1303 | |

1304 | static bool |

1305 | altered_reg_used (const_rtx x, bitmap alt) |

1306 | { |

1307 | subrtx_iterator::array_type array; |

1308 | FOR_EACH_SUBRTX (iter, array, x, NONCONST) |

1309 | { |

1310 | const_rtx x = *iter; |

1311 | if (REG_P (x) && REGNO_REG_SET_P (alt, REGNO (x))) |

1312 | return true; |

1313 | } |

1314 | return false; |

1315 | } |

1316 | |

1317 | /* Marks registers altered by EXPR in set ALT. */ |

1318 | |

1319 | static void |

1320 | mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED, void *alt) |

1321 | { |

1322 | if (GET_CODE (expr) == SUBREG) |

1323 | expr = SUBREG_REG (expr); |

1324 | if (!REG_P (expr)) |

1325 | return; |

1326 | |

1327 | SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr)); |

1328 | } |

1329 | |

1330 | /* Checks whether RHS is simple enough to process. */ |

1331 | |

1332 | static bool |

1333 | simple_rhs_p (rtx rhs) |

1334 | { |

1335 | rtx op0, op1; |

1336 | |

1337 | if (function_invariant_p (rhs) |

1338 | || (REG_P (rhs) && !HARD_REGISTER_P (rhs))) |

1339 | return true; |

1340 | |

1341 | switch (GET_CODE (rhs)) |

1342 | { |

1343 | case PLUS: |

1344 | case MINUS: |

1345 | case AND: |

1346 | op0 = XEXP (rhs, 0); |

1347 | op1 = XEXP (rhs, 1); |

1348 | /* Allow reg OP const and reg OP reg. */ |

1349 | if (!(REG_P (op0) && !HARD_REGISTER_P (op0)) |

1350 | && !function_invariant_p (op0)) |

1351 | return false; |

1352 | if (!(REG_P (op1) && !HARD_REGISTER_P (op1)) |

1353 | && !function_invariant_p (op1)) |

1354 | return false; |

1355 | |

1356 | return true; |

1357 | |

1358 | case ASHIFT: |

1359 | case ASHIFTRT: |

1360 | case LSHIFTRT: |

1361 | case MULT: |

1362 | op0 = XEXP (rhs, 0); |

1363 | op1 = XEXP (rhs, 1); |

1364 | /* Allow reg OP const. */ |

1365 | if (!(REG_P (op0) && !HARD_REGISTER_P (op0))) |

1366 | return false; |

1367 | if (!function_invariant_p (op1)) |

1368 | return false; |

1369 | |

1370 | return true; |

1371 | |

1372 | default: |

1373 | return false; |

1374 | } |

1375 | } |

1376 | |

1377 | /* If REGNO has a single definition, return its known value, otherwise return |

1378 | null. */ |

1379 | |

1380 | static rtx |

1381 | find_single_def_src (unsigned int regno) |

1382 | { |

1383 | df_ref adef; |

1384 | rtx set, src; |

1385 | |

1386 | for (;;) |

1387 | { |

1388 | rtx note; |

1389 | adef = DF_REG_DEF_CHAIN (regno); |

1390 | if (adef == NULL || DF_REF_NEXT_REG (adef) != NULL |

1391 | || DF_REF_IS_ARTIFICIAL (adef)) |

1392 | return NULL_RTX; |

1393 | |

1394 | set = single_set (DF_REF_INSN (adef)); |

1395 | if (set == NULL || !REG_P (SET_DEST (set)) |

1396 | || REGNO (SET_DEST (set)) != regno) |

1397 | return NULL_RTX; |

1398 | |

1399 | note = find_reg_equal_equiv_note (DF_REF_INSN (adef)); |

1400 | |

1401 | if (note && function_invariant_p (XEXP (note, 0))) |

1402 | { |

1403 | src = XEXP (note, 0); |

1404 | break; |

1405 | } |

1406 | src = SET_SRC (set); |

1407 | |

1408 | if (REG_P (src)) |

1409 | { |

1410 | regno = REGNO (src); |

1411 | continue; |

1412 | } |

1413 | break; |

1414 | } |

1415 | if (!function_invariant_p (src)) |

1416 | return NULL_RTX; |

1417 | |

1418 | return src; |

1419 | } |

1420 | |

1421 | /* If any registers in *EXPR that have a single definition, try to replace |

1422 | them with the known-equivalent values. */ |

1423 | |

1424 | static void |

1425 | replace_single_def_regs (rtx *expr) |

1426 | { |

1427 | subrtx_var_iterator::array_type array; |

1428 | repeat: |

1429 | FOR_EACH_SUBRTX_VAR (iter, array, *expr, NONCONST) |

1430 | { |

1431 | rtx x = *iter; |

1432 | if (REG_P (x)) |

1433 | if (rtx new_x = find_single_def_src (REGNO (x))) |

1434 | { |

1435 | *expr = simplify_replace_rtx (*expr, x, new_x); |

1436 | goto repeat; |

1437 | } |

1438 | } |

1439 | } |

1440 | |

1441 | /* A subroutine of simplify_using_initial_values, this function examines INSN |

1442 | to see if it contains a suitable set that we can use to make a replacement. |

1443 | If it is suitable, return true and set DEST and SRC to the lhs and rhs of |

1444 | the set; return false otherwise. */ |

1445 | |

1446 | static bool |

1447 | suitable_set_for_replacement (rtx_insn *insn, rtx *dest, rtx *src) |

1448 | { |

1449 | rtx set = single_set (insn); |

1450 | rtx lhs = NULL_RTX, rhs; |

1451 | |

1452 | if (!set) |

1453 | return false; |

1454 | |

1455 | lhs = SET_DEST (set); |

1456 | if (!REG_P (lhs)) |

1457 | return false; |

1458 | |

1459 | rhs = find_reg_equal_equiv_note (insn); |

1460 | if (rhs) |

1461 | rhs = XEXP (rhs, 0); |

1462 | else |

1463 | rhs = SET_SRC (set); |

1464 | |

1465 | if (!simple_rhs_p (rhs)) |

1466 | return false; |

1467 | |

1468 | *dest = lhs; |

1469 | *src = rhs; |

1470 | return true; |

1471 | } |

1472 | |

1473 | /* Using the data returned by suitable_set_for_replacement, replace DEST |

1474 | with SRC in *EXPR and return the new expression. Also call |

1475 | replace_single_def_regs if the replacement changed something. */ |

1476 | static void |

1477 | replace_in_expr (rtx *expr, rtx dest, rtx src) |

1478 | { |

1479 | rtx old = *expr; |

1480 | *expr = simplify_replace_rtx (*expr, dest, src); |

1481 | if (old == *expr) |

1482 | return; |

1483 | replace_single_def_regs (expr); |

1484 | } |

1485 | |

1486 | /* Checks whether A implies B. */ |

1487 | |

1488 | static bool |

1489 | implies_p (rtx a, rtx b) |

1490 | { |

1491 | rtx op0, op1, opb0, opb1; |

1492 | machine_mode mode; |

1493 | |

1494 | if (rtx_equal_p (a, b)) |

1495 | return true; |

1496 | |

1497 | if (GET_CODE (a) == EQ) |

1498 | { |

1499 | op0 = XEXP (a, 0); |

1500 | op1 = XEXP (a, 1); |

1501 | |

1502 | if (REG_P (op0) |

1503 | || (GET_CODE (op0) == SUBREG |

1504 | && REG_P (SUBREG_REG (op0)))) |

1505 | { |

1506 | rtx r = simplify_replace_rtx (b, op0, op1); |

1507 | if (r == const_true_rtx) |

1508 | return true; |

1509 | } |

1510 | |

1511 | if (REG_P (op1) |

1512 | || (GET_CODE (op1) == SUBREG |

1513 | && REG_P (SUBREG_REG (op1)))) |

1514 | { |

1515 | rtx r = simplify_replace_rtx (b, op1, op0); |

1516 | if (r == const_true_rtx) |

1517 | return true; |

1518 | } |

1519 | } |

1520 | |

1521 | if (b == const_true_rtx) |

1522 | return true; |

1523 | |

1524 | if ((GET_RTX_CLASS (GET_CODE (a)) != RTX_COMM_COMPARE |

1525 | && GET_RTX_CLASS (GET_CODE (a)) != RTX_COMPARE) |

1526 | || (GET_RTX_CLASS (GET_CODE (b)) != RTX_COMM_COMPARE |

1527 | && GET_RTX_CLASS (GET_CODE (b)) != RTX_COMPARE)) |

1528 | return false; |

1529 | |

1530 | op0 = XEXP (a, 0); |

1531 | op1 = XEXP (a, 1); |

1532 | opb0 = XEXP (b, 0); |

1533 | opb1 = XEXP (b, 1); |

1534 | |

1535 | mode = GET_MODE (op0); |

1536 | if (mode != GET_MODE (opb0)) |

1537 | mode = VOIDmode; |

1538 | else if (mode == VOIDmode) |

1539 | { |

1540 | mode = GET_MODE (op1); |

1541 | if (mode != GET_MODE (opb1)) |

1542 | mode = VOIDmode; |

1543 | } |

1544 | |

1545 | /* A < B implies A + 1 <= B. */ |

1546 | if ((GET_CODE (a) == GT || GET_CODE (a) == LT) |

1547 | && (GET_CODE (b) == GE || GET_CODE (b) == LE)) |

1548 | { |

1549 | |

1550 | if (GET_CODE (a) == GT) |

1551 | std::swap (op0, op1); |

1552 | |

1553 | if (GET_CODE (b) == GE) |

1554 | std::swap (opb0, opb1); |

1555 | |

1556 | if (SCALAR_INT_MODE_P (mode) |

1557 | && rtx_equal_p (op1, opb1) |

1558 | && simplify_gen_binary (MINUS, mode, opb0, op0) == const1_rtx) |

1559 | return true; |

1560 | return false; |

1561 | } |

1562 | |

1563 | /* A < B or A > B imply A != B. TODO: Likewise |

1564 | A + n < B implies A != B + n if neither wraps. */ |

1565 | if (GET_CODE (b) == NE |

1566 | && (GET_CODE (a) == GT || GET_CODE (a) == GTU |

1567 | || GET_CODE (a) == LT || GET_CODE (a) == LTU)) |

1568 | { |

1569 | if (rtx_equal_p (op0, opb0) |

1570 | && rtx_equal_p (op1, opb1)) |

1571 | return true; |

1572 | } |

1573 | |

1574 | /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */ |

1575 | if (GET_CODE (a) == NE |

1576 | && op1 == const0_rtx) |

1577 | { |

1578 | if ((GET_CODE (b) == GTU |

1579 | && opb1 == const0_rtx) |

1580 | || (GET_CODE (b) == GEU |

1581 | && opb1 == const1_rtx)) |

1582 | return rtx_equal_p (op0, opb0); |

1583 | } |

1584 | |

1585 | /* A != N is equivalent to A - (N + 1) <u -1. */ |

1586 | if (GET_CODE (a) == NE |

1587 | && CONST_INT_P (op1) |

1588 | && GET_CODE (b) == LTU |

1589 | && opb1 == constm1_rtx |

1590 | && GET_CODE (opb0) == PLUS |

1591 | && CONST_INT_P (XEXP (opb0, 1)) |

1592 | /* Avoid overflows. */ |

1593 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |

1594 | != ((unsigned HOST_WIDE_INT)1 |

1595 | << (HOST_BITS_PER_WIDE_INT - 1)) - 1) |

1596 | && INTVAL (XEXP (opb0, 1)) + 1 == -INTVAL (op1)) |

1597 | return rtx_equal_p (op0, XEXP (opb0, 0)); |

1598 | |

1599 | /* Likewise, A != N implies A - N > 0. */ |

1600 | if (GET_CODE (a) == NE |

1601 | && CONST_INT_P (op1)) |

1602 | { |

1603 | if (GET_CODE (b) == GTU |

1604 | && GET_CODE (opb0) == PLUS |

1605 | && opb1 == const0_rtx |

1606 | && CONST_INT_P (XEXP (opb0, 1)) |

1607 | /* Avoid overflows. */ |

1608 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |

1609 | != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) |

1610 | && rtx_equal_p (XEXP (opb0, 0), op0)) |

1611 | return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); |

1612 | if (GET_CODE (b) == GEU |

1613 | && GET_CODE (opb0) == PLUS |

1614 | && opb1 == const1_rtx |

1615 | && CONST_INT_P (XEXP (opb0, 1)) |

1616 | /* Avoid overflows. */ |

1617 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |

1618 | != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) |

1619 | && rtx_equal_p (XEXP (opb0, 0), op0)) |

1620 | return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); |

1621 | } |

1622 | |

1623 | /* A >s X, where X is positive, implies A <u Y, if Y is negative. */ |

1624 | if ((GET_CODE (a) == GT || GET_CODE (a) == GE) |

1625 | && CONST_INT_P (op1) |

1626 | && ((GET_CODE (a) == GT && op1 == constm1_rtx) |

1627 | || INTVAL (op1) >= 0) |

1628 | && GET_CODE (b) == LTU |

1629 | && CONST_INT_P (opb1) |

1630 | && rtx_equal_p (op0, opb0)) |

1631 | return INTVAL (opb1) < 0; |

1632 | |

1633 | return false; |

1634 | } |

1635 | |

1636 | /* Canonicalizes COND so that |

1637 | |

1638 | (1) Ensure that operands are ordered according to |

1639 | swap_commutative_operands_p. |

1640 | (2) (LE x const) will be replaced with (LT x <const+1>) and similarly |

1641 | for GE, GEU, and LEU. */ |

1642 | |

1643 | rtx |

1644 | canon_condition (rtx cond) |

1645 | { |

1646 | rtx op0, op1; |

1647 | enum rtx_code code; |

1648 | machine_mode mode; |

1649 | |

1650 | code = GET_CODE (cond); |

1651 | op0 = XEXP (cond, 0); |

1652 | op1 = XEXP (cond, 1); |

1653 | |

1654 | if (swap_commutative_operands_p (op0, op1)) |

1655 | { |

1656 | code = swap_condition (code); |

1657 | std::swap (op0, op1); |

1658 | } |

1659 | |

1660 | mode = GET_MODE (op0); |

1661 | if (mode == VOIDmode) |

1662 | mode = GET_MODE (op1); |

1663 | gcc_assert (mode != VOIDmode); |

1664 | |

1665 | if (CONST_SCALAR_INT_P (op1) && GET_MODE_CLASS (mode) != MODE_CC) |

1666 | { |

1667 | rtx_mode_t const_val (op1, mode); |

1668 | |

1669 | switch (code) |

1670 | { |

1671 | case LE: |

1672 | if (wi::ne_p (const_val, wi::max_value (mode, SIGNED))) |

1673 | { |

1674 | code = LT; |

1675 | op1 = immed_wide_int_const (wi::add (const_val, 1), mode); |

1676 | } |

1677 | break; |

1678 | |

1679 | case GE: |

1680 | if (wi::ne_p (const_val, wi::min_value (mode, SIGNED))) |

1681 | { |

1682 | code = GT; |

1683 | op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); |

1684 | } |

1685 | break; |

1686 | |

1687 | case LEU: |

1688 | if (wi::ne_p (const_val, -1)) |

1689 | { |

1690 | code = LTU; |

1691 | op1 = immed_wide_int_const (wi::add (const_val, 1), mode); |

1692 | } |

1693 | break; |

1694 | |

1695 | case GEU: |

1696 | if (wi::ne_p (const_val, 0)) |

1697 | { |

1698 | code = GTU; |

1699 | op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); |

1700 | } |

1701 | break; |

1702 | |

1703 | default: |

1704 | break; |

1705 | } |

1706 | } |

1707 | |

1708 | if (op0 != XEXP (cond, 0) |

1709 | || op1 != XEXP (cond, 1) |

1710 | || code != GET_CODE (cond) |

1711 | || GET_MODE (cond) != SImode) |

1712 | cond = gen_rtx_fmt_ee (code, SImode, op0, op1); |

1713 | |

1714 | return cond; |

1715 | } |

1716 | |

1717 | /* Reverses CONDition; returns NULL if we cannot. */ |

1718 | |

1719 | static rtx |

1720 | reversed_condition (rtx cond) |

1721 | { |

1722 | enum rtx_code reversed; |

1723 | reversed = reversed_comparison_code (cond, NULL); |

1724 | if (reversed == UNKNOWN) |

1725 | return NULL_RTX; |

1726 | else |

1727 | return gen_rtx_fmt_ee (reversed, |

1728 | GET_MODE (cond), XEXP (cond, 0), |

1729 | XEXP (cond, 1)); |

1730 | } |

1731 | |

1732 | /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the |

1733 | set of altered regs. */ |

1734 | |

1735 | void |

1736 | simplify_using_condition (rtx cond, rtx *expr, regset altered) |

1737 | { |

1738 | rtx rev, reve, exp = *expr; |

1739 | |

1740 | /* If some register gets altered later, we do not really speak about its |

1741 | value at the time of comparison. */ |

1742 | if (altered && altered_reg_used (cond, altered)) |

1743 | return; |

1744 | |

1745 | if (GET_CODE (cond) == EQ |

1746 | && REG_P (XEXP (cond, 0)) && CONSTANT_P (XEXP (cond, 1))) |

1747 | { |

1748 | *expr = simplify_replace_rtx (*expr, XEXP (cond, 0), XEXP (cond, 1)); |

1749 | return; |

1750 | } |

1751 | |

1752 | if (!COMPARISON_P (exp)) |

1753 | return; |

1754 | |

1755 | rev = reversed_condition (cond); |

1756 | reve = reversed_condition (exp); |

1757 | |

1758 | cond = canon_condition (cond); |

1759 | exp = canon_condition (exp); |

1760 | if (rev) |

1761 | rev = canon_condition (rev); |

1762 | if (reve) |

1763 | reve = canon_condition (reve); |

1764 | |

1765 | if (rtx_equal_p (exp, cond)) |

1766 | { |

1767 | *expr = const_true_rtx; |

1768 | return; |

1769 | } |

1770 | |

1771 | if (rev && rtx_equal_p (exp, rev)) |

1772 | { |

1773 | *expr = const0_rtx; |

1774 | return; |

1775 | } |

1776 | |

1777 | if (implies_p (cond, exp)) |

1778 | { |

1779 | *expr = const_true_rtx; |

1780 | return; |

1781 | } |

1782 | |

1783 | if (reve && implies_p (cond, reve)) |

1784 | { |

1785 | *expr = const0_rtx; |

1786 | return; |

1787 | } |

1788 | |

1789 | /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must |

1790 | be false. */ |

1791 | if (rev && implies_p (exp, rev)) |

1792 | { |

1793 | *expr = const0_rtx; |

1794 | return; |

1795 | } |

1796 | |

1797 | /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */ |

1798 | if (rev && reve && implies_p (reve, rev)) |

1799 | { |

1800 | *expr = const_true_rtx; |

1801 | return; |

1802 | } |

1803 | |

1804 | /* We would like to have some other tests here. TODO. */ |

1805 | |

1806 | return; |

1807 | } |

1808 | |

1809 | /* Use relationship between A and *B to eventually eliminate *B. |

1810 | OP is the operation we consider. */ |

1811 | |

1812 | static void |

1813 | eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b) |

1814 | { |

1815 | switch (op) |

1816 | { |

1817 | case AND: |

1818 | /* If A implies *B, we may replace *B by true. */ |

1819 | if (implies_p (a, *b)) |

1820 | *b = const_true_rtx; |

1821 | break; |

1822 | |

1823 | case IOR: |

1824 | /* If *B implies A, we may replace *B by false. */ |

1825 | if (implies_p (*b, a)) |

1826 | *b = const0_rtx; |

1827 | break; |

1828 | |

1829 | default: |

1830 | gcc_unreachable (); |

1831 | } |

1832 | } |

1833 | |

1834 | /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the |

1835 | operation we consider. */ |

1836 | |

1837 | static void |

1838 | eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail) |

1839 | { |

1840 | rtx elt; |

1841 | |

1842 | for (elt = tail; elt; elt = XEXP (elt, 1)) |

1843 | eliminate_implied_condition (op, *head, &XEXP (elt, 0)); |

1844 | for (elt = tail; elt; elt = XEXP (elt, 1)) |

1845 | eliminate_implied_condition (op, XEXP (elt, 0), head); |

1846 | } |

1847 | |

1848 | /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR |

1849 | is a list, its elements are assumed to be combined using OP. */ |

1850 | |

1851 | static void |

1852 | simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr) |

1853 | { |

1854 | bool expression_valid; |

1855 | rtx head, tail, last_valid_expr; |

1856 | rtx_expr_list *cond_list; |

1857 | rtx_insn *insn; |

1858 | rtx neutral, aggr; |

1859 | regset altered, this_altered; |

1860 | edge e; |

1861 | |

1862 | if (!*expr) |

1863 | return; |

1864 | |

1865 | if (CONSTANT_P (*expr)) |

1866 | return; |

1867 | |

1868 | if (GET_CODE (*expr) == EXPR_LIST) |

1869 | { |

1870 | head = XEXP (*expr, 0); |

1871 | tail = XEXP (*expr, 1); |

1872 | |

1873 | eliminate_implied_conditions (op, &head, tail); |

1874 | |

1875 | switch (op) |

1876 | { |

1877 | case AND: |

1878 | neutral = const_true_rtx; |

1879 | aggr = const0_rtx; |

1880 | break; |

1881 | |

1882 | case IOR: |

1883 | neutral = const0_rtx; |

1884 | aggr = const_true_rtx; |

1885 | break; |

1886 | |

1887 | default: |

1888 | gcc_unreachable (); |

1889 | } |

1890 | |

1891 | simplify_using_initial_values (loop, UNKNOWN, &head); |

1892 | if (head == aggr) |

1893 | { |

1894 | XEXP (*expr, 0) = aggr; |

1895 | XEXP (*expr, 1) = NULL_RTX; |

1896 | return; |

1897 | } |

1898 | else if (head == neutral) |

1899 | { |

1900 | *expr = tail; |

1901 | simplify_using_initial_values (loop, op, expr); |

1902 | return; |

1903 | } |

1904 | simplify_using_initial_values (loop, op, &tail); |

1905 | |

1906 | if (tail && XEXP (tail, 0) == aggr) |

1907 | { |

1908 | *expr = tail; |

1909 | return; |

1910 | } |

1911 | |

1912 | XEXP (*expr, 0) = head; |

1913 | XEXP (*expr, 1) = tail; |

1914 | return; |

1915 | } |

1916 | |

1917 | gcc_assert (op == UNKNOWN); |

1918 | |

1919 | replace_single_def_regs (expr); |

1920 | if (CONSTANT_P (*expr)) |

1921 | return; |

1922 | |

1923 | e = loop_preheader_edge (loop); |

1924 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |

1925 | return; |

1926 | |

1927 | altered = ALLOC_REG_SET (®_obstack); |

1928 | this_altered = ALLOC_REG_SET (®_obstack); |

1929 | |

1930 | expression_valid = true; |

1931 | last_valid_expr = *expr; |

1932 | cond_list = NULL; |

1933 | while (1) |

1934 | { |

1935 | insn = BB_END (e->src); |

1936 | if (any_condjump_p (insn)) |

1937 | { |

1938 | rtx cond = get_condition (BB_END (e->src), NULL, false, true); |

1939 | |

1940 | if (cond && (e->flags & EDGE_FALLTHRU)) |

1941 | cond = reversed_condition (cond); |

1942 | if (cond) |

1943 | { |

1944 | rtx old = *expr; |

1945 | simplify_using_condition (cond, expr, altered); |

1946 | if (old != *expr) |

1947 | { |

1948 | rtx note; |

1949 | if (CONSTANT_P (*expr)) |

1950 | goto out; |

1951 | for (note = cond_list; note; note = XEXP (note, 1)) |

1952 | { |

1953 | simplify_using_condition (XEXP (note, 0), expr, altered); |

1954 | if (CONSTANT_P (*expr)) |

1955 | goto out; |

1956 | } |

1957 | } |

1958 | cond_list = alloc_EXPR_LIST (0, cond, cond_list); |

1959 | } |

1960 | } |

1961 | |

1962 | FOR_BB_INSNS_REVERSE (e->src, insn) |

1963 | { |

1964 | rtx src, dest; |

1965 | rtx old = *expr; |

1966 | |

1967 | if (!INSN_P (insn)) |

1968 | continue; |

1969 | |

1970 | CLEAR_REG_SET (this_altered); |

1971 | note_stores (PATTERN (insn), mark_altered, this_altered); |

1972 | if (CALL_P (insn)) |

1973 | { |

1974 | /* Kill all call clobbered registers. */ |

1975 | unsigned int i; |

1976 | hard_reg_set_iterator hrsi; |

1977 | EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, |

1978 | 0, i, hrsi) |

1979 | SET_REGNO_REG_SET (this_altered, i); |

1980 | } |

1981 | |

1982 | if (suitable_set_for_replacement (insn, &dest, &src)) |

1983 | { |

1984 | rtx_expr_list **pnote, **pnote_next; |

1985 | |

1986 | replace_in_expr (expr, dest, src); |

1987 | if (CONSTANT_P (*expr)) |

1988 | goto out; |

1989 | |

1990 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |

1991 | { |

1992 | rtx_expr_list *note = *pnote; |

1993 | rtx old_cond = XEXP (note, 0); |

1994 | |

1995 | pnote_next = (rtx_expr_list **)&XEXP (note, 1); |

1996 | replace_in_expr (&XEXP (note, 0), dest, src); |

1997 | |

1998 | /* We can no longer use a condition that has been simplified |

1999 | to a constant, and simplify_using_condition will abort if |

2000 | we try. */ |

2001 | if (CONSTANT_P (XEXP (note, 0))) |

2002 | { |

2003 | *pnote = *pnote_next; |

2004 | pnote_next = pnote; |

2005 | free_EXPR_LIST_node (note); |

2006 | } |

2007 | /* Retry simplifications with this condition if either the |

2008 | expression or the condition changed. */ |

2009 | else if (old_cond != XEXP (note, 0) || old != *expr) |

2010 | simplify_using_condition (XEXP (note, 0), expr, altered); |

2011 | } |

2012 | } |

2013 | else |

2014 | { |

2015 | rtx_expr_list **pnote, **pnote_next; |

2016 | |

2017 | /* If we did not use this insn to make a replacement, any overlap |

2018 | between stores in this insn and our expression will cause the |

2019 | expression to become invalid. */ |

2020 | if (altered_reg_used (*expr, this_altered)) |

2021 | goto out; |

2022 | |

2023 | /* Likewise for the conditions. */ |

2024 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |

2025 | { |

2026 | rtx_expr_list *note = *pnote; |

2027 | rtx old_cond = XEXP (note, 0); |

2028 | |

2029 | pnote_next = (rtx_expr_list **)&XEXP (note, 1); |

2030 | if (altered_reg_used (old_cond, this_altered)) |

2031 | { |

2032 | *pnote = *pnote_next; |

2033 | pnote_next = pnote; |

2034 | free_EXPR_LIST_node (note); |

2035 | } |

2036 | } |

2037 | } |

2038 | |

2039 | if (CONSTANT_P (*expr)) |

2040 | goto out; |

2041 | |

2042 | IOR_REG_SET (altered, this_altered); |

2043 | |

2044 | /* If the expression now contains regs that have been altered, we |

2045 | can't return it to the caller. However, it is still valid for |

2046 | further simplification, so keep searching to see if we can |

2047 | eventually turn it into a constant. */ |

2048 | if (altered_reg_used (*expr, altered)) |

2049 | expression_valid = false; |

2050 | if (expression_valid) |

2051 | last_valid_expr = *expr; |

2052 | } |

2053 | |

2054 | if (!single_pred_p (e->src) |

2055 | || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |

2056 | break; |

2057 | e = single_pred_edge (e->src); |

2058 | } |

2059 | |

2060 | out: |

2061 | free_EXPR_LIST_list (&cond_list); |

2062 | if (!CONSTANT_P (*expr)) |

2063 | *expr = last_valid_expr; |

2064 | FREE_REG_SET (altered); |

2065 | FREE_REG_SET (this_altered); |

2066 | } |

2067 | |

2068 | /* Transforms invariant IV into MODE. Adds assumptions based on the fact |

2069 | that IV occurs as left operands of comparison COND and its signedness |

2070 | is SIGNED_P to DESC. */ |

2071 | |

2072 | static void |

2073 | shorten_into_mode (struct rtx_iv *iv, scalar_int_mode mode, |

2074 | enum rtx_code cond, bool signed_p, struct niter_desc *desc) |

2075 | { |

2076 | rtx mmin, mmax, cond_over, cond_under; |

2077 | |

2078 | get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax); |

2079 | cond_under = simplify_gen_relational (LT, SImode, iv->extend_mode, |

2080 | iv->base, mmin); |

2081 | cond_over = simplify_gen_relational (GT, SImode, iv->extend_mode, |

2082 | iv->base, mmax); |

2083 | |

2084 | switch (cond) |

2085 | { |

2086 | case LE: |

2087 | case LT: |

2088 | case LEU: |

2089 | case LTU: |

2090 | if (cond_under != const0_rtx) |

2091 | desc->infinite = |

2092 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |

2093 | if (cond_over != const0_rtx) |

2094 | desc->noloop_assumptions = |

2095 | alloc_EXPR_LIST (0, cond_over, desc->noloop_assumptions); |

2096 | break; |

2097 | |

2098 | case GE: |

2099 | case GT: |

2100 | case GEU: |

2101 | case GTU: |

2102 | if (cond_over != const0_rtx) |

2103 | desc->infinite = |

2104 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |

2105 | if (cond_under != const0_rtx) |

2106 | desc->noloop_assumptions = |

2107 | alloc_EXPR_LIST (0, cond_under, desc->noloop_assumptions); |

2108 | break; |

2109 | |

2110 | case NE: |

2111 | if (cond_over != const0_rtx) |

2112 | desc->infinite = |

2113 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |

2114 | if (cond_under != const0_rtx) |

2115 | desc->infinite = |

2116 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |

2117 | break; |

2118 | |

2119 | default: |

2120 | gcc_unreachable (); |

2121 | } |

2122 | |

2123 | iv->mode = mode; |

2124 | iv->extend = signed_p ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |

2125 | } |

2126 | |

2127 | /* Transforms IV0 and IV1 compared by COND so that they are both compared as |

2128 | subregs of the same mode if possible (sometimes it is necessary to add |

2129 | some assumptions to DESC). */ |

2130 | |

2131 | static bool |

2132 | canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1, |

2133 | enum rtx_code cond, struct niter_desc *desc) |

2134 | { |

2135 | scalar_int_mode comp_mode; |

2136 | bool signed_p; |

2137 | |

2138 | /* If the ivs behave specially in the first iteration, or are |

2139 | added/multiplied after extending, we ignore them. */ |

2140 | if (iv0->first_special || iv0->mult != const1_rtx || iv0->delta != const0_rtx) |

2141 | return false; |

2142 | if (iv1->first_special || iv1->mult != const1_rtx || iv1->delta != const0_rtx) |

2143 | return false; |

2144 | |

2145 | /* If there is some extend, it must match signedness of the comparison. */ |

2146 | switch (cond) |

2147 | { |

2148 | case LE: |

2149 | case LT: |

2150 | if (iv0->extend == IV_ZERO_EXTEND |

2151 | || iv1->extend == IV_ZERO_EXTEND) |

2152 | return false; |

2153 | signed_p = true; |

2154 | break; |

2155 | |

2156 | case LEU: |

2157 | case LTU: |

2158 | if (iv0->extend == IV_SIGN_EXTEND |

2159 | || iv1->extend == IV_SIGN_EXTEND) |

2160 | return false; |

2161 | signed_p = false; |

2162 | break; |

2163 | |

2164 | case NE: |

2165 | if (iv0->extend != IV_UNKNOWN_EXTEND |

2166 | && iv1->extend != IV_UNKNOWN_EXTEND |

2167 | && iv0->extend != iv1->extend) |

2168 | return false; |

2169 | |

2170 | signed_p = false; |

2171 | if (iv0->extend != IV_UNKNOWN_EXTEND) |

2172 | signed_p = iv0->extend == IV_SIGN_EXTEND; |

2173 | if (iv1->extend != IV_UNKNOWN_EXTEND) |

2174 | signed_p = iv1->extend == IV_SIGN_EXTEND; |

2175 | break; |

2176 | |

2177 | default: |

2178 | gcc_unreachable (); |

2179 | } |

2180 | |

2181 | /* Values of both variables should be computed in the same mode. These |

2182 | might indeed be different, if we have comparison like |

2183 | |

2184 | (compare (subreg:SI (iv0)) (subreg:SI (iv1))) |

2185 | |

2186 | and iv0 and iv1 are both ivs iterating in SI mode, but calculated |

2187 | in different modes. This does not seem impossible to handle, but |

2188 | it hardly ever occurs in practice. |

2189 | |

2190 | The only exception is the case when one of operands is invariant. |

2191 | For example pentium 3 generates comparisons like |

2192 | (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we |

2193 | definitely do not want this prevent the optimization. */ |

2194 | comp_mode = iv0->extend_mode; |

2195 | if (GET_MODE_BITSIZE (comp_mode) < GET_MODE_BITSIZE (iv1->extend_mode)) |

2196 | comp_mode = iv1->extend_mode; |

2197 | |

2198 | if (iv0->extend_mode != comp_mode) |

2199 | { |

2200 | if (iv0->mode != iv0->extend_mode |

2201 | || iv0->step != const0_rtx) |

2202 | return false; |

2203 | |

2204 | iv0->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, |

2205 | comp_mode, iv0->base, iv0->mode); |

2206 | iv0->extend_mode = comp_mode; |

2207 | } |

2208 | |

2209 | if (iv1->extend_mode != comp_mode) |

2210 | { |

2211 | if (iv1->mode != iv1->extend_mode |

2212 | || iv1->step != const0_rtx) |

2213 | return false; |

2214 | |

2215 | iv1->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, |

2216 | comp_mode, iv1->base, iv1->mode); |

2217 | iv1->extend_mode = comp_mode; |

2218 | } |

2219 | |

2220 | /* Check that both ivs belong to a range of a single mode. If one of the |

2221 | operands is an invariant, we may need to shorten it into the common |

2222 | mode. */ |

2223 | if (iv0->mode == iv0->extend_mode |

2224 | && iv0->step == const0_rtx |

2225 | && iv0->mode != iv1->mode) |

2226 | shorten_into_mode (iv0, iv1->mode, cond, signed_p, desc); |

2227 | |

2228 | if (iv1->mode == iv1->extend_mode |

2229 | && iv1->step == const0_rtx |

2230 | && iv0->mode != iv1->mode) |

2231 | shorten_into_mode (iv1, iv0->mode, swap_condition (cond), signed_p, desc); |

2232 | |

2233 | if (iv0->mode != iv1->mode) |

2234 | return false; |

2235 | |

2236 | desc->mode = iv0->mode; |

2237 | desc->signed_p = signed_p; |

2238 | |

2239 | return true; |

2240 | } |

2241 | |

2242 | /* Tries to estimate the maximum number of iterations in LOOP, and return the |

2243 | result. This function is called from iv_number_of_iterations with |

2244 | a number of fields in DESC already filled in. OLD_NITER is the original |

2245 | expression for the number of iterations, before we tried to simplify it. */ |

2246 | |

2247 | static uint64_t |

2248 | determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter) |

2249 | { |

2250 | rtx niter = desc->niter_expr; |

2251 | rtx mmin, mmax, cmp; |

2252 | uint64_t nmax, inc; |

2253 | uint64_t andmax = 0; |

2254 | |

2255 | /* We used to look for constant operand 0 of AND, |

2256 | but canonicalization should always make this impossible. */ |

2257 | gcc_checking_assert (GET_CODE (niter) != AND |

2258 | || !CONST_INT_P (XEXP (niter, 0))); |

2259 | |

2260 | if (GET_CODE (niter) == AND |

2261 | && CONST_INT_P (XEXP (niter, 1))) |

2262 | { |

2263 | andmax = UINTVAL (XEXP (niter, 1)); |

2264 | niter = XEXP (niter, 0); |

2265 | } |

2266 | |

2267 | get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax); |

2268 | nmax = UINTVAL (mmax) - UINTVAL (mmin); |

2269 | |

2270 | if (GET_CODE (niter) == UDIV) |

2271 | { |

2272 | if (!CONST_INT_P (XEXP (niter, 1))) |

2273 | return nmax; |

2274 | inc = INTVAL (XEXP (niter, 1)); |

2275 | niter = XEXP (niter, 0); |

2276 | } |

2277 | else |

2278 | inc = 1; |

2279 | |

2280 | /* We could use a binary search here, but for now improving the upper |

2281 | bound by just one eliminates one important corner case. */ |

2282 | cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode, |

2283 | desc->mode, old_niter, mmax); |

2284 | simplify_using_initial_values (loop, UNKNOWN, &cmp); |

2285 | if (cmp == const_true_rtx) |

2286 | { |

2287 | nmax--; |

2288 | |

2289 | if (dump_file) |

2290 | fprintf (dump_file, ";; improved upper bound by one.\n"); |

2291 | } |

2292 | nmax /= inc; |

2293 | if (andmax) |

2294 | nmax = MIN (nmax, andmax); |

2295 | if (dump_file) |

2296 | fprintf (dump_file, ";; Determined upper bound %"PRId64 ".\n", |

2297 | nmax); |

2298 | return nmax; |

2299 | } |

2300 | |

2301 | /* Computes number of iterations of the CONDITION in INSN in LOOP and stores |

2302 | the result into DESC. Very similar to determine_number_of_iterations |

2303 | (basically its rtl version), complicated by things like subregs. */ |

2304 | |

2305 | static void |

2306 | iv_number_of_iterations (struct loop *loop, rtx_insn *insn, rtx condition, |

2307 | struct niter_desc *desc) |

2308 | { |

2309 | rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1; |

2310 | struct rtx_iv iv0, iv1; |

2311 | rtx assumption, may_not_xform; |

2312 | enum rtx_code cond; |

2313 | machine_mode nonvoid_mode; |

2314 | scalar_int_mode comp_mode; |

2315 | rtx mmin, mmax, mode_mmin, mode_mmax; |

2316 | uint64_t s, size, d, inv, max, up, down; |

2317 | int64_t inc, step_val; |

2318 | int was_sharp = false; |

2319 | rtx old_niter; |

2320 | bool step_is_pow2; |

2321 | |

2322 | /* The meaning of these assumptions is this: |

2323 | if !assumptions |

2324 | then the rest of information does not have to be valid |

2325 | if noloop_assumptions then the loop does not roll |

2326 | if infinite then this exit is never used */ |

2327 | |

2328 | desc->assumptions = NULL_RTX; |

2329 | desc->noloop_assumptions = NULL_RTX; |

2330 | desc->infinite = NULL_RTX; |

2331 | desc->simple_p = true; |

2332 | |

2333 | desc->const_iter = false; |

2334 | desc->niter_expr = NULL_RTX; |

2335 | |

2336 | cond = GET_CODE (condition); |

2337 | gcc_assert (COMPARISON_P (condition)); |

2338 | |

2339 | nonvoid_mode = GET_MODE (XEXP (condition, 0)); |

2340 | if (nonvoid_mode == VOIDmode) |

2341 | nonvoid_mode = GET_MODE (XEXP (condition, 1)); |

2342 | /* The constant comparisons should be folded. */ |

2343 | gcc_assert (nonvoid_mode != VOIDmode); |

2344 | |

2345 | /* We only handle integers or pointers. */ |

2346 | scalar_int_mode mode; |

2347 | if (!is_a <scalar_int_mode> (nonvoid_mode, &mode)) |

2348 | goto fail; |

2349 | |

2350 | op0 = XEXP (condition, 0); |

2351 | if (!iv_analyze (insn, mode, op0, &iv0)) |

2352 | goto fail; |

2353 | |

2354 | op1 = XEXP (condition, 1); |

2355 | if (!iv_analyze (insn, mode, op1, &iv1)) |

2356 | goto fail; |

2357 | |

2358 | if (GET_MODE_BITSIZE (iv0.extend_mode) > HOST_BITS_PER_WIDE_INT |

2359 | || GET_MODE_BITSIZE (iv1.extend_mode) > HOST_BITS_PER_WIDE_INT) |

2360 | goto fail; |

2361 | |

2362 | /* Check condition and normalize it. */ |

2363 | |

2364 | switch (cond) |

2365 | { |

2366 | case GE: |

2367 | case GT: |

2368 | case GEU: |

2369 | case GTU: |

2370 | std::swap (iv0, iv1); |

2371 | cond = swap_condition (cond); |

2372 | break; |

2373 | case NE: |

2374 | case LE: |

2375 | case LEU: |

2376 | case LT: |

2377 | case LTU: |

2378 | break; |

2379 | default: |

2380 | goto fail; |

2381 | } |

2382 | |

2383 | /* Handle extends. This is relatively nontrivial, so we only try in some |

2384 | easy cases, when we can canonicalize the ivs (possibly by adding some |

2385 | assumptions) to shape subreg (base + i * step). This function also fills |

2386 | in desc->mode and desc->signed_p. */ |

2387 | |

2388 | if (!canonicalize_iv_subregs (&iv0, &iv1, cond, desc)) |

2389 | goto fail; |

2390 | |

2391 | comp_mode = iv0.extend_mode; |

2392 | mode = iv0.mode; |

2393 | size = GET_MODE_PRECISION (mode); |

2394 | get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax); |

2395 | mode_mmin = lowpart_subreg (mode, mmin, comp_mode); |

2396 | mode_mmax = lowpart_subreg (mode, mmax, comp_mode); |

2397 | |

2398 | if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step)) |

2399 | goto fail; |

2400 | |

2401 | /* We can take care of the case of two induction variables chasing each other |

2402 | if the test is NE. I have never seen a loop using it, but still it is |

2403 | cool. */ |

2404 | if (iv0.step != const0_rtx && iv1.step != const0_rtx) |

2405 | { |

2406 | if (cond != NE) |

2407 | goto fail; |

2408 | |

2409 | iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); |

2410 | iv1.step = const0_rtx; |

2411 | } |

2412 | |

2413 | iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); |

2414 | iv1.step = lowpart_subreg (mode, iv1.step, comp_mode); |

2415 | |

2416 | /* This is either infinite loop or the one that ends immediately, depending |

2417 | on initial values. Unswitching should remove this kind of conditions. */ |

2418 | if (iv0.step == const0_rtx && iv1.step == const0_rtx) |

2419 | goto fail; |

2420 | |

2421 | if (cond != NE) |

2422 | { |

2423 | if (iv0.step == const0_rtx) |

2424 | step_val = -INTVAL (iv1.step); |

2425 | else |

2426 | step_val = INTVAL (iv0.step); |

2427 | |

2428 | /* Ignore loops of while (i-- < 10) type. */ |

2429 | if (step_val < 0) |

2430 | goto fail; |

2431 | |

2432 | step_is_pow2 = !(step_val & (step_val - 1)); |

2433 | } |

2434 | else |

2435 | { |

2436 | /* We do not care about whether the step is power of two in this |

2437 | case. */ |

2438 | step_is_pow2 = false; |

2439 | step_val = 0; |

2440 | } |

2441 | |

2442 | /* Some more condition normalization. We must record some assumptions |

2443 | due to overflows. */ |

2444 | switch (cond) |

2445 | { |

2446 | case LT: |

2447 | case LTU: |

2448 | /* We want to take care only of non-sharp relationals; this is easy, |

2449 | as in cases the overflow would make the transformation unsafe |

2450 | the loop does not roll. Seemingly it would make more sense to want |

2451 | to take care of sharp relationals instead, as NE is more similar to |

2452 | them, but the problem is that here the transformation would be more |

2453 | difficult due to possibly infinite loops. */ |

2454 | if (iv0.step == const0_rtx) |

2455 | { |

2456 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |

2457 | assumption = simplify_gen_relational (EQ, SImode, mode, tmp, |

2458 | mode_mmax); |

2459 | if (assumption == const_true_rtx) |

2460 | goto zero_iter_simplify; |

2461 | iv0.base = simplify_gen_binary (PLUS, comp_mode, |

2462 | iv0.base, const1_rtx); |

2463 | } |

2464 | else |

2465 | { |

2466 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |

2467 | assumption = simplify_gen_relational (EQ, SImode, mode, tmp, |

2468 | mode_mmin); |

2469 | if (assumption == const_true_rtx) |

2470 | goto zero_iter_simplify; |

2471 | iv1.base = simplify_gen_binary (PLUS, comp_mode, |

2472 | iv1.base, constm1_rtx); |

2473 | } |

2474 | |

2475 | if (assumption != const0_rtx) |

2476 | desc->noloop_assumptions = |

2477 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |

2478 | cond = (cond == LT) ? LE : LEU; |

2479 | |

2480 | /* It will be useful to be able to tell the difference once more in |

2481 | LE -> NE reduction. */ |

2482 | was_sharp = true; |

2483 | break; |

2484 | default: ; |

2485 | } |

2486 | |

2487 | /* Take care of trivially infinite loops. */ |

2488 | if (cond != NE) |

2489 | { |

2490 | if (iv0.step == const0_rtx) |

2491 | { |

2492 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |

2493 | if (rtx_equal_p (tmp, mode_mmin)) |

2494 | { |

2495 | desc->infinite = |

2496 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); |

2497 | /* Fill in the remaining fields somehow. */ |

2498 | goto zero_iter_simplify; |

2499 | } |

2500 | } |

2501 | else |

2502 | { |

2503 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |

2504 | if (rtx_equal_p (tmp, mode_mmax)) |

2505 | { |

2506 | desc->infinite = |

2507 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); |

2508 | /* Fill in the remaining fields somehow. */ |

2509 | goto zero_iter_simplify; |

2510 | } |

2511 | } |

2512 | } |

2513 | |

2514 | /* If we can we want to take care of NE conditions instead of size |

2515 | comparisons, as they are much more friendly (most importantly |

2516 | this takes care of special handling of loops with step 1). We can |

2517 | do it if we first check that upper bound is greater or equal to |

2518 | lower bound, their difference is constant c modulo step and that |

2519 | there is not an overflow. */ |

2520 | if (cond != NE) |

2521 | { |

2522 | if (iv0.step == const0_rtx) |

2523 | step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode); |

2524 | else |

2525 | step = iv0.step; |

2526 | step = lowpart_subreg (mode, step, comp_mode); |

2527 | delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); |

2528 | delta = lowpart_subreg (mode, delta, comp_mode); |

2529 | delta = simplify_gen_binary (UMOD, mode, delta, step); |

2530 | may_xform = const0_rtx; |

2531 | may_not_xform = const_true_rtx; |

2532 | |

2533 | if (CONST_INT_P (delta)) |

2534 | { |

2535 | if (was_sharp && INTVAL (delta) == INTVAL (step) - 1) |

2536 | { |

2537 | /* A special case. We have transformed condition of type |

2538 | for (i = 0; i < 4; i += 4) |

2539 | into |

2540 | for (i = 0; i <= 3; i += 4) |

2541 | obviously if the test for overflow during that transformation |

2542 | passed, we cannot overflow here. Most importantly any |

2543 | loop with sharp end condition and step 1 falls into this |

2544 | category, so handling this case specially is definitely |

2545 | worth the troubles. */ |

2546 | may_xform = const_true_rtx; |

2547 | } |

2548 | else if (iv0.step == const0_rtx) |

2549 | { |

2550 | bound = simplify_gen_binary (PLUS, comp_mode, mmin, step); |

2551 | bound = simplify_gen_binary (MINUS, comp_mode, bound, delta); |

2552 | bound = lowpart_subreg (mode, bound, comp_mode); |

2553 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |

2554 | may_xform = simplify_gen_relational (cond, SImode, mode, |

2555 | bound, tmp); |

2556 | may_not_xform = simplify_gen_relational (reverse_condition (cond), |

2557 | SImode, mode, |

2558 | bound, tmp); |

2559 | } |

2560 | else |

2561 | { |

2562 | bound = simplify_gen_binary (MINUS, comp_mode, mmax, step); |

2563 | bound = simplify_gen_binary (PLUS, comp_mode, bound, delta); |

2564 | bound = lowpart_subreg (mode, bound, comp_mode); |

2565 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |

2566 | may_xform = simplify_gen_relational (cond, SImode, mode, |

2567 | tmp, bound); |

2568 | may_not_xform = simplify_gen_relational (reverse_condition (cond), |

2569 | SImode, mode, |

2570 | tmp, bound); |

2571 | } |

2572 | } |

2573 | |

2574 | if (may_xform != const0_rtx) |

2575 | { |

2576 | /* We perform the transformation always provided that it is not |

2577 | completely senseless. This is OK, as we would need this assumption |

2578 | to determine the number of iterations anyway. */ |

2579 | if (may_xform != const_true_rtx) |

2580 | { |

2581 | /* If the step is a power of two and the final value we have |

2582 | computed overflows, the cycle is infinite. Otherwise it |

2583 | is nontrivial to compute the number of iterations. */ |

2584 | if (step_is_pow2) |

2585 | desc->infinite = alloc_EXPR_LIST (0, may_not_xform, |

2586 | desc->infinite); |

2587 | else |

2588 | desc->assumptions = alloc_EXPR_LIST (0, may_xform, |

2589 | desc->assumptions); |

2590 | } |

2591 | |

2592 | /* We are going to lose some information about upper bound on |

2593 | number of iterations in this step, so record the information |

2594 | here. */ |

2595 | inc = INTVAL (iv0.step) - INTVAL (iv1.step); |

2596 | if (CONST_INT_P (iv1.base)) |

2597 | up = INTVAL (iv1.base); |

2598 | else |

2599 | up = INTVAL (mode_mmax) - inc; |

2600 | down = INTVAL (CONST_INT_P (iv0.base) |

2601 | ? iv0.base |

2602 | : mode_mmin); |

2603 | max = (up - down) / inc + 1; |

2604 | if (!desc->infinite |

2605 | && !desc->assumptions) |

2606 | record_niter_bound (loop, max, false, true); |

2607 | |

2608 | if (iv0.step == const0_rtx) |

2609 | { |

2610 | iv0.base = simplify_gen_binary (PLUS, comp_mode, iv0.base, delta); |

2611 | iv0.base = simplify_gen_binary (MINUS, comp_mode, iv0.base, step); |

2612 | } |

2613 | else |

2614 | { |

2615 | iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, delta); |

2616 | iv1.base = simplify_gen_binary (PLUS, comp_mode, iv1.base, step); |

2617 | } |

2618 | |

2619 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |

2620 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |

2621 | assumption = simplify_gen_relational (reverse_condition (cond), |

2622 | SImode, mode, tmp0, tmp1); |

2623 | if (assumption == const_true_rtx) |

2624 | goto zero_iter_simplify; |

2625 | else if (assumption != const0_rtx) |

2626 | desc->noloop_assumptions = |

2627 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |

2628 | cond = NE; |

2629 | } |

2630 | } |

2631 | |

2632 | /* Count the number of iterations. */ |

2633 | if (cond == NE) |

2634 | { |

2635 | /* Everything we do here is just arithmetics modulo size of mode. This |

2636 | makes us able to do more involved computations of number of iterations |

2637 | than in other cases. First transform the condition into shape |

2638 | s * i <> c, with s positive. */ |

2639 | iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); |

2640 | iv0.base = const0_rtx; |

2641 | iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); |

2642 | iv1.step = const0_rtx; |

2643 | if (INTVAL (iv0.step) < 0) |

2644 | { |

2645 | iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode); |

2646 | iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode); |

2647 | } |

2648 | iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); |

2649 | |

2650 | /* Let nsd (s, size of mode) = d. If d does not divide c, the loop |

2651 | is infinite. Otherwise, the number of iterations is |

2652 | (inverse(s/d) * (c/d)) mod (size of mode/d). */ |

2653 | s = INTVAL (iv0.step); d = 1; |

2654 | while (s % 2 != 1) |

2655 | { |

2656 | s /= 2; |

2657 | d *= 2; |

2658 | size--; |

2659 | } |

2660 | bound = GEN_INT (((uint64_t) 1 << (size - 1 ) << 1) - 1); |

2661 | |

2662 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |

2663 | tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode)); |

2664 | assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx); |

2665 | desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite); |

2666 | |

2667 | tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode)); |

2668 | inv = inverse (s, size); |

2669 | tmp = simplify_gen_binary (MULT, mode, tmp, gen_int_mode (inv, mode)); |

2670 | desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound); |

2671 | } |

2672 | else |

2673 | { |

2674 | if (iv1.step == const0_rtx) |

2675 | /* Condition in shape a + s * i <= b |

2676 | We must know that b + s does not overflow and a <= b + s and then we |

2677 | can compute number of iterations as (b + s - a) / s. (It might |

2678 | seem that we in fact could be more clever about testing the b + s |

2679 | overflow condition using some information about b - a mod s, |

2680 | but it was already taken into account during LE -> NE transform). */ |

2681 | { |

2682 | step = iv0.step; |

2683 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |

2684 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |

2685 | |

2686 | bound = simplify_gen_binary (MINUS, mode, mode_mmax, |

2687 | lowpart_subreg (mode, step, |

2688 | comp_mode)); |

2689 | if (step_is_pow2) |

2690 | { |

2691 | rtx t0, t1; |

2692 | |

2693 | /* If s is power of 2, we know that the loop is infinite if |

2694 | a % s <= b % s and b + s overflows. */ |

2695 | assumption = simplify_gen_relational (reverse_condition (cond), |

2696 | SImode, mode, |

2697 | tmp1, bound); |

2698 | |

2699 | t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); |

2700 | t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); |

2701 | tmp = simplify_gen_relational (cond, SImode, mode, t0, t1); |

2702 | assumption = simplify_gen_binary (AND, SImode, assumption, tmp); |

2703 | desc->infinite = |

2704 | alloc_EXPR_LIST (0, assumption, desc->infinite); |

2705 | } |

2706 | else |

2707 | { |

2708 | assumption = simplify_gen_relational (cond, SImode, mode, |

2709 | tmp1, bound); |

2710 | desc->assumptions = |

2711 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |

2712 | } |

2713 | |

2714 | tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step); |

2715 | tmp = lowpart_subreg (mode, tmp, comp_mode); |

2716 | assumption = simplify_gen_relational (reverse_condition (cond), |

2717 | SImode, mode, tmp0, tmp); |

2718 | |

2719 | delta = simplify_gen_binary (PLUS, mode, tmp1, step); |

2720 | delta = simplify_gen_binary (MINUS, mode, delta, tmp0); |

2721 | } |

2722 | else |

2723 | { |

2724 | /* Condition in shape a <= b - s * i |

2725 | We must know that a - s does not overflow and a - s <= b and then |

2726 | we can again compute number of iterations as (b - (a - s)) / s. */ |

2727 | step = simplify_gen_unary (NEG, mode, iv1.step, mode); |

2728 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |

2729 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |

2730 | |

2731 | bound = simplify_gen_binary (PLUS, mode, mode_mmin, |

2732 | lowpart_subreg (mode, step, comp_mode)); |

2733 | if (step_is_pow2) |

2734 | { |

2735 | rtx t0, t1; |

2736 | |

2737 | /* If s is power of 2, we know that the loop is infinite if |

2738 | a % s <= b % s and a - s overflows. */ |

2739 | assumption = simplify_gen_relational (reverse_condition (cond), |

2740 | SImode, mode, |

2741 | bound, tmp0); |

2742 | |

2743 | t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); |

2744 | t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); |

2745 | tmp = simplify_gen_relational (cond, SImode, mode, t0, t1); |

2746 | assumption = simplify_gen_binary (AND, SImode, assumption, tmp); |

2747 | desc->infinite = |

2748 | alloc_EXPR_LIST (0, assumption, desc->infinite); |

2749 | } |

2750 | else |

2751 | { |

2752 | assumption = simplify_gen_relational (cond, SImode, mode, |

2753 | bound, tmp0); |

2754 | desc->assumptions = |

2755 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |

2756 | } |

2757 | |

2758 | tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step); |

2759 | tmp = lowpart_subreg (mode, tmp, comp_mode); |

2760 | assumption = simplify_gen_relational (reverse_condition (cond), |

2761 | SImode, mode, |

2762 | tmp, tmp1); |

2763 | delta = simplify_gen_binary (MINUS, mode, tmp0, step); |

2764 | delta = simplify_gen_binary (MINUS, mode, tmp1, delta); |

2765 | } |

2766 | if (assumption == const_true_rtx) |

2767 | goto zero_iter_simplify; |

2768 | else if (assumption != const0_rtx) |

2769 | desc->noloop_assumptions = |

2770 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |

2771 | delta = simplify_gen_binary (UDIV, mode, delta, step); |

2772 | desc->niter_expr = delta; |

2773 | } |

2774 | |

2775 | old_niter = desc->niter_expr; |

2776 | |

2777 | simplify_using_initial_values (loop, AND, &desc->assumptions); |

2778 | if (desc->assumptions |

2779 | && XEXP (desc->assumptions, 0) == const0_rtx) |

2780 | goto fail; |

2781 | simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); |

2782 | simplify_using_initial_values (loop, IOR, &desc->infinite); |

2783 | simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); |

2784 | |

2785 | /* Rerun the simplification. Consider code (created by copying loop headers) |

2786 | |

2787 | i = 0; |

2788 | |

2789 | if (0 < n) |

2790 | { |

2791 | do |

2792 | { |

2793 | i++; |

2794 | } while (i < n); |

2795 | } |

2796 | |

2797 | The first pass determines that i = 0, the second pass uses it to eliminate |

2798 | noloop assumption. */ |

2799 | |

2800 | simplify_using_initial_values (loop, AND, &desc->assumptions); |

2801 | if (desc->assumptions |

2802 | && XEXP (desc->assumptions, 0) == const0_rtx) |

2803 | goto fail; |

2804 | simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); |

2805 | simplify_using_initial_values (loop, IOR, &desc->infinite); |

2806 | simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); |

2807 | |

2808 | if (desc->noloop_assumptions |

2809 | && XEXP (desc->noloop_assumptions, 0) == const_true_rtx) |

2810 | goto zero_iter; |

2811 | |

2812 | if (CONST_INT_P (desc->niter_expr)) |

2813 | { |

2814 | uint64_t val = INTVAL (desc->niter_expr); |

2815 | |

2816 | desc->const_iter = true; |

2817 | desc->niter = val & GET_MODE_MASK (desc->mode); |

2818 | if (!desc->infinite |

2819 | && !desc->assumptions) |

2820 | record_niter_bound (loop, desc->niter, false, true); |

2821 | } |

2822 | else |

2823 | { |

2824 | max = determine_max_iter (loop, desc, old_niter); |

2825 | if (!max) |

2826 | goto zero_iter_simplify; |

2827 | if (!desc->infinite |

2828 | && !desc->assumptions) |

2829 | record_niter_bound (loop, max, false, true); |

2830 | |

2831 | /* simplify_using_initial_values does a copy propagation on the registers |

2832 | in the expression for the number of iterations. This prolongs life |

2833 | ranges of registers and increases register pressure, and usually |

2834 | brings no gain (and if it happens to do, the cse pass will take care |

2835 | of it anyway). So prevent this behavior, unless it enabled us to |

2836 | derive that the number of iterations is a constant. */ |

2837 | desc->niter_expr = old_niter; |

2838 | } |

2839 | |

2840 | return; |

2841 | |

2842 | zero_iter_simplify: |

2843 | /* Simplify the assumptions. */ |

2844 | simplify_using_initial_values (loop, AND, &desc->assumptions); |

2845 | if (desc->assumptions |

2846 | && XEXP (desc->assumptions, 0) == const0_rtx) |

2847 | goto fail; |

2848 | simplify_using_initial_values (loop, IOR, &desc->infinite); |

2849 | |

2850 | /* Fallthru. */ |

2851 | zero_iter: |

2852 | desc->const_iter = true; |

2853 | desc->niter = 0; |

2854 | record_niter_bound (loop, 0, true, true); |

2855 | desc->noloop_assumptions = NULL_RTX; |

2856 | desc->niter_expr = const0_rtx; |

2857 | return; |

2858 | |

2859 | fail: |

2860 | desc->simple_p = false; |

2861 | return; |

2862 | } |

2863 | |

2864 | /* Checks whether E is a simple exit from LOOP and stores its description |

2865 | into DESC. */ |

2866 | |

2867 | static void |

2868 | check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) |

2869 | { |

2870 | basic_block exit_bb; |

2871 | rtx condition; |

2872 | rtx_insn *at; |

2873 | edge ein; |

2874 | |

2875 | exit_bb = e->src; |

2876 | desc->simple_p = false; |

2877 | |

2878 | /* It must belong directly to the loop. */ |

2879 | if (exit_bb->loop_father != loop) |

2880 | return; |

2881 | |

2882 | /* It must be tested (at least) once during any iteration. */ |

2883 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit_bb)) |

2884 | return; |

2885 | |

2886 | /* It must end in a simple conditional jump. */ |

2887 | if (!any_condjump_p (BB_END (exit_bb))) |

2888 | return; |

2889 | |

2890 | ein = EDGE_SUCC (exit_bb, 0); |

2891 | if (ein == e) |

2892 | ein = EDGE_SUCC (exit_bb, 1); |

2893 | |

2894 | desc->out_edge = e; |

2895 | desc->in_edge = ein; |

2896 | |

2897 | /* Test whether the condition is suitable. */ |

2898 | if (!(condition = get_condition (BB_END (ein->src), &at, false, false))) |

2899 | return; |

2900 | |

2901 | if (ein->flags & EDGE_FALLTHRU) |

2902 | { |

2903 | condition = reversed_condition (condition); |

2904 | if (!condition) |

2905 | return; |

2906 | } |

2907 | |

2908 | /* Check that we are able to determine number of iterations and fill |

2909 | in information about it. */ |

2910 | iv_number_of_iterations (loop, at, condition, desc); |

2911 | } |

2912 | |

2913 | /* Finds a simple exit of LOOP and stores its description into DESC. */ |

2914 | |

2915 | void |

2916 | find_simple_exit (struct loop *loop, struct niter_desc *desc) |

2917 | { |

2918 | unsigned i; |

2919 | basic_block *body; |

2920 | edge e; |

2921 | struct niter_desc act; |

2922 | bool any = false; |

2923 | edge_iterator ei; |

2924 | |

2925 | desc->simple_p = false; |

2926 | body = get_loop_body (loop); |

2927 | |

2928 | for (i = 0; i < loop->num_nodes; i++) |

2929 | { |

2930 | FOR_EACH_EDGE (e, ei, body[i]->succs) |

2931 | { |

2932 | if (flow_bb_inside_loop_p (loop, e->dest)) |

2933 | continue; |

2934 | |

2935 | check_simple_exit (loop, e, &act); |

2936 | if (!act.simple_p) |

2937 | continue; |

2938 | |

2939 | if (!any) |

2940 | any = true; |

2941 | else |

2942 | { |

2943 | /* Prefer constant iterations; the less the better. */ |

2944 | if (!act.const_iter |

2945 | || (desc->const_iter && act.niter >= desc->niter)) |

2946 | continue; |

2947 | |

2948 | /* Also if the actual exit may be infinite, while the old one |

2949 | not, prefer the old one. */ |

2950 | if (act.infinite && !desc->infinite) |

2951 | continue; |

2952 | } |

2953 | |

2954 | *desc = act; |

2955 | } |

2956 | } |

2957 | |

2958 | if (dump_file) |

2959 | { |

2960 | if (desc->simple_p) |

2961 | { |

2962 | fprintf (dump_file, "Loop %d is simple:\n", loop->num); |

2963 | fprintf (dump_file, " simple exit %d -> %d\n", |

2964 | desc->out_edge->src->index, |

2965 | desc->out_edge->dest->index); |

2966 | if (desc->assumptions) |

2967 | { |

2968 | fprintf (dump_file, " assumptions: "); |

2969 | print_rtl (dump_file, desc->assumptions); |

2970 | fprintf (dump_file, "\n"); |

2971 | } |

2972 | if (desc->noloop_assumptions) |

2973 | { |

2974 | fprintf (dump_file, " does not roll if: "); |

2975 | print_rtl (dump_file, desc->noloop_assumptions); |

2976 | fprintf (dump_file, "\n"); |

2977 | } |

2978 | if (desc->infinite) |

2979 | { |

2980 | fprintf (dump_file, " infinite if: "); |

2981 | print_rtl (dump_file, desc->infinite); |

2982 | fprintf (dump_file, "\n"); |

2983 | } |

2984 | |

2985 | fprintf (dump_file, " number of iterations: "); |

2986 | print_rtl (dump_file, desc->niter_expr); |

2987 | fprintf (dump_file, "\n"); |

2988 | |

2989 | fprintf (dump_file, " upper bound: %li\n", |

2990 | (long)get_max_loop_iterations_int (loop)); |

2991 | fprintf (dump_file, " likely upper bound: %li\n", |

2992 | (long)get_likely_max_loop_iterations_int (loop)); |

2993 | fprintf (dump_file, " realistic bound: %li\n", |

2994 | (long)get_estimated_loop_iterations_int (loop)); |

2995 | } |

2996 | else |

2997 | fprintf (dump_file, "Loop %d is not simple.\n", loop->num); |

2998 | } |

2999 | |

3000 | free (body); |

3001 | } |

3002 | |

3003 | /* Creates a simple loop description of LOOP if it was not computed |

3004 | already. */ |

3005 | |

3006 | struct niter_desc * |

3007 | get_simple_loop_desc (struct loop *loop) |

3008 | { |

3009 | struct niter_desc *desc = simple_loop_desc (loop); |

3010 | |

3011 | if (desc) |

3012 | return desc; |

3013 | |

3014 | /* At least desc->infinite is not always initialized by |

3015 | find_simple_loop_exit. */ |

3016 | desc = ggc_cleared_alloc<niter_desc> (); |

3017 | iv_analysis_loop_init (loop); |

3018 | find_simple_exit (loop, desc); |

3019 | loop->simple_loop_desc = desc; |

3020 | return desc; |

3021 | } |

3022 | |

3023 | /* Releases simple loop description for LOOP. */ |

3024 | |

3025 | void |

3026 | free_simple_loop_desc (struct loop *loop) |

3027 | { |

3028 | struct niter_desc *desc = simple_loop_desc (loop); |

3029 | |

3030 | if (!desc) |

3031 | return; |

3032 | |

3033 | ggc_free (desc); |

3034 | loop->simple_loop_desc = NULL; |

3035 | } |

3036 |