1238438Sdteske/* Support routines for Value Range Propagation (VRP). 2238438Sdteske Copyright (C) 2005-2015 Free Software Foundation, Inc. 3238438Sdteske Contributed by Diego Novillo <dnovillo@redhat.com>. 4238438Sdteske 5238438SdteskeThis file is part of GCC. 6238438Sdteske 7238438SdteskeGCC is free software; you can redistribute it and/or modify 8238438Sdteskeit under the terms of the GNU General Public License as published by 9238438Sdteskethe Free Software Foundation; either version 3, or (at your option) 10238438Sdteskeany later version. 11238438Sdteske 12238438SdteskeGCC is distributed in the hope that it will be useful, 13238438Sdteskebut WITHOUT ANY WARRANTY; without even the implied warranty of 14238438SdteskeMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15238438SdteskeGNU General Public License for more details. 16238438Sdteske 17238438SdteskeYou should have received a copy of the GNU General Public License 18238438Sdteskealong with GCC; see the file COPYING3. If not see 19238438Sdteske<http://www.gnu.org/licenses/>. */ 20238438Sdteske 21238438Sdteske#include "config.h" 22238438Sdteske#include "system.h" 23238438Sdteske#include "coretypes.h" 24238438Sdteske#include "tm.h" 25238438Sdteske#include "flags.h" 26238438Sdteske#include "hash-set.h" 27238438Sdteske#include "machmode.h" 28238438Sdteske#include "vec.h" 29251906Sdteske#include "double-int.h" 30251906Sdteske#include "input.h" 31238438Sdteske#include "alias.h" 32251906Sdteske#include "symtab.h" 33238438Sdteske#include "wide-int.h" 34251906Sdteske#include "inchash.h" 35251906Sdteske#include "tree.h" 36251906Sdteske#include "fold-const.h" 37238438Sdteske#include "stor-layout.h" 38251906Sdteske#include "calls.h" 39238438Sdteske#include "predict.h" 40251906Sdteske#include "hard-reg-set.h" 41251906Sdteske#include "function.h" 42251906Sdteske#include "dominance.h" 43251906Sdteske#include "cfg.h" 44251906Sdteske#include "cfganal.h" 45251906Sdteske#include "basic-block.h" 46251906Sdteske#include "tree-ssa-alias.h" 47251906Sdteske#include "internal-fn.h" 48238438Sdteske#include "gimple-fold.h" 49238438Sdteske#include "tree-eh.h" 50238438Sdteske#include "gimple-expr.h" 51238438Sdteske#include "is-a.h" 52238438Sdteske#include "gimple.h" 53238438Sdteske#include "gimple-iterator.h" 54238438Sdteske#include "gimple-walk.h" 55238438Sdteske#include "gimple-ssa.h" 56251906Sdteske#include "tree-cfg.h" 57238438Sdteske#include "tree-phinodes.h" 58251906Sdteske#include "ssa-iterators.h" 59238438Sdteske#include "stringpool.h" 60251906Sdteske#include "tree-ssanames.h" 61251906Sdteske#include "tree-ssa-loop-manip.h" 62251906Sdteske#include "tree-ssa-loop-niter.h" 63251906Sdteske#include "tree-ssa-loop.h" 64238438Sdteske#include "tree-into-ssa.h" 65#include "tree-ssa.h" 66#include "tree-pass.h" 67#include "tree-dump.h" 68#include "gimple-pretty-print.h" 69#include "diagnostic-core.h" 70#include "intl.h" 71#include "cfgloop.h" 72#include "tree-scalar-evolution.h" 73#include "tree-ssa-propagate.h" 74#include "tree-chrec.h" 75#include "tree-ssa-threadupdate.h" 76#include "hashtab.h" 77#include "rtl.h" 78#include "statistics.h" 79#include "real.h" 80#include "fixed-value.h" 81#include "insn-config.h" 82#include "expmed.h" 83#include "dojump.h" 84#include "explow.h" 85#include "emit-rtl.h" 86#include "varasm.h" 87#include "stmt.h" 88#include "expr.h" 89#include "insn-codes.h" 90#include "optabs.h" 91#include "tree-ssa-threadedge.h" 92 93 94 95/* Range of values that can be associated with an SSA_NAME after VRP 96 has executed. */ 97struct value_range_d 98{ 99 /* Lattice value represented by this range. */ 100 enum value_range_type type; 101 102 /* Minimum and maximum values represented by this range. These 103 values should be interpreted as follows: 104 105 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must 106 be NULL. 107 108 - If TYPE == VR_RANGE then MIN holds the minimum value and 109 MAX holds the maximum value of the range [MIN, MAX]. 110 111 - If TYPE == ANTI_RANGE the variable is known to NOT 112 take any values in the range [MIN, MAX]. */ 113 tree min; 114 tree max; 115 116 /* Set of SSA names whose value ranges are equivalent to this one. 117 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ 118 bitmap equiv; 119}; 120 121typedef struct value_range_d value_range_t; 122 123#define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } 124 125/* Set of SSA names found live during the RPO traversal of the function 126 for still active basic-blocks. */ 127static sbitmap *live; 128 129/* Return true if the SSA name NAME is live on the edge E. */ 130 131static bool 132live_on_edge (edge e, tree name) 133{ 134 return (live[e->dest->index] 135 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); 136} 137 138/* Local functions. */ 139static int compare_values (tree val1, tree val2); 140static int compare_values_warnv (tree val1, tree val2, bool *); 141static void vrp_meet (value_range_t *, value_range_t *); 142static void vrp_intersect_ranges (value_range_t *, value_range_t *); 143static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, 144 tree, tree, bool, bool *, 145 bool *); 146 147/* Location information for ASSERT_EXPRs. Each instance of this 148 structure describes an ASSERT_EXPR for an SSA name. Since a single 149 SSA name may have more than one assertion associated with it, these 150 locations are kept in a linked list attached to the corresponding 151 SSA name. */ 152struct assert_locus_d 153{ 154 /* Basic block where the assertion would be inserted. */ 155 basic_block bb; 156 157 /* Some assertions need to be inserted on an edge (e.g., assertions 158 generated by COND_EXPRs). In those cases, BB will be NULL. */ 159 edge e; 160 161 /* Pointer to the statement that generated this assertion. */ 162 gimple_stmt_iterator si; 163 164 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ 165 enum tree_code comp_code; 166 167 /* Value being compared against. */ 168 tree val; 169 170 /* Expression to compare. */ 171 tree expr; 172 173 /* Next node in the linked list. */ 174 struct assert_locus_d *next; 175}; 176 177typedef struct assert_locus_d *assert_locus_t; 178 179/* If bit I is present, it means that SSA name N_i has a list of 180 assertions that should be inserted in the IL. */ 181static bitmap need_assert_for; 182 183/* Array of locations lists where to insert assertions. ASSERTS_FOR[I] 184 holds a list of ASSERT_LOCUS_T nodes that describe where 185 ASSERT_EXPRs for SSA name N_I should be inserted. */ 186static assert_locus_t *asserts_for; 187 188/* Value range array. After propagation, VR_VALUE[I] holds the range 189 of values that SSA name N_I may take. */ 190static unsigned num_vr_values; 191static value_range_t **vr_value; 192static bool values_propagated; 193 194/* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the 195 number of executable edges we saw the last time we visited the 196 node. */ 197static int *vr_phi_edge_counts; 198 199typedef struct { 200 gswitch *stmt; 201 tree vec; 202} switch_update; 203 204static vec<edge> to_remove_edges; 205static vec<switch_update> to_update_switch_stmts; 206 207 208/* Return the maximum value for TYPE. */ 209 210static inline tree 211vrp_val_max (const_tree type) 212{ 213 if (!INTEGRAL_TYPE_P (type)) 214 return NULL_TREE; 215 216 return TYPE_MAX_VALUE (type); 217} 218 219/* Return the minimum value for TYPE. */ 220 221static inline tree 222vrp_val_min (const_tree type) 223{ 224 if (!INTEGRAL_TYPE_P (type)) 225 return NULL_TREE; 226 227 return TYPE_MIN_VALUE (type); 228} 229 230/* Return whether VAL is equal to the maximum value of its type. This 231 will be true for a positive overflow infinity. We can't do a 232 simple equality comparison with TYPE_MAX_VALUE because C typedefs 233 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == 234 to the integer constant with the same value in the type. */ 235 236static inline bool 237vrp_val_is_max (const_tree val) 238{ 239 tree type_max = vrp_val_max (TREE_TYPE (val)); 240 return (val == type_max 241 || (type_max != NULL_TREE 242 && operand_equal_p (val, type_max, 0))); 243} 244 245/* Return whether VAL is equal to the minimum value of its type. This 246 will be true for a negative overflow infinity. */ 247 248static inline bool 249vrp_val_is_min (const_tree val) 250{ 251 tree type_min = vrp_val_min (TREE_TYPE (val)); 252 return (val == type_min 253 || (type_min != NULL_TREE 254 && operand_equal_p (val, type_min, 0))); 255} 256 257 258/* Return whether TYPE should use an overflow infinity distinct from 259 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to 260 represent a signed overflow during VRP computations. An infinity 261 is distinct from a half-range, which will go from some number to 262 TYPE_{MIN,MAX}_VALUE. */ 263 264static inline bool 265needs_overflow_infinity (const_tree type) 266{ 267 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); 268} 269 270/* Return whether TYPE can support our overflow infinity 271 representation: we use the TREE_OVERFLOW flag, which only exists 272 for constants. If TYPE doesn't support this, we don't optimize 273 cases which would require signed overflow--we drop them to 274 VARYING. */ 275 276static inline bool 277supports_overflow_infinity (const_tree type) 278{ 279 tree min = vrp_val_min (type), max = vrp_val_max (type); 280#ifdef ENABLE_CHECKING 281 gcc_assert (needs_overflow_infinity (type)); 282#endif 283 return (min != NULL_TREE 284 && CONSTANT_CLASS_P (min) 285 && max != NULL_TREE 286 && CONSTANT_CLASS_P (max)); 287} 288 289/* VAL is the maximum or minimum value of a type. Return a 290 corresponding overflow infinity. */ 291 292static inline tree 293make_overflow_infinity (tree val) 294{ 295 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); 296 val = copy_node (val); 297 TREE_OVERFLOW (val) = 1; 298 return val; 299} 300 301/* Return a negative overflow infinity for TYPE. */ 302 303static inline tree 304negative_overflow_infinity (tree type) 305{ 306 gcc_checking_assert (supports_overflow_infinity (type)); 307 return make_overflow_infinity (vrp_val_min (type)); 308} 309 310/* Return a positive overflow infinity for TYPE. */ 311 312static inline tree 313positive_overflow_infinity (tree type) 314{ 315 gcc_checking_assert (supports_overflow_infinity (type)); 316 return make_overflow_infinity (vrp_val_max (type)); 317} 318 319/* Return whether VAL is a negative overflow infinity. */ 320 321static inline bool 322is_negative_overflow_infinity (const_tree val) 323{ 324 return (TREE_OVERFLOW_P (val) 325 && needs_overflow_infinity (TREE_TYPE (val)) 326 && vrp_val_is_min (val)); 327} 328 329/* Return whether VAL is a positive overflow infinity. */ 330 331static inline bool 332is_positive_overflow_infinity (const_tree val) 333{ 334 return (TREE_OVERFLOW_P (val) 335 && needs_overflow_infinity (TREE_TYPE (val)) 336 && vrp_val_is_max (val)); 337} 338 339/* Return whether VAL is a positive or negative overflow infinity. */ 340 341static inline bool 342is_overflow_infinity (const_tree val) 343{ 344 return (TREE_OVERFLOW_P (val) 345 && needs_overflow_infinity (TREE_TYPE (val)) 346 && (vrp_val_is_min (val) || vrp_val_is_max (val))); 347} 348 349/* Return whether STMT has a constant rhs that is_overflow_infinity. */ 350 351static inline bool 352stmt_overflow_infinity (gimple stmt) 353{ 354 if (is_gimple_assign (stmt) 355 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == 356 GIMPLE_SINGLE_RHS) 357 return is_overflow_infinity (gimple_assign_rhs1 (stmt)); 358 return false; 359} 360 361/* If VAL is now an overflow infinity, return VAL. Otherwise, return 362 the same value with TREE_OVERFLOW clear. This can be used to avoid 363 confusing a regular value with an overflow value. */ 364 365static inline tree 366avoid_overflow_infinity (tree val) 367{ 368 if (!is_overflow_infinity (val)) 369 return val; 370 371 if (vrp_val_is_max (val)) 372 return vrp_val_max (TREE_TYPE (val)); 373 else 374 { 375 gcc_checking_assert (vrp_val_is_min (val)); 376 return vrp_val_min (TREE_TYPE (val)); 377 } 378} 379 380 381/* Return true if ARG is marked with the nonnull attribute in the 382 current function signature. */ 383 384static bool 385nonnull_arg_p (const_tree arg) 386{ 387 tree t, attrs, fntype; 388 unsigned HOST_WIDE_INT arg_num; 389 390 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); 391 392 /* The static chain decl is always non null. */ 393 if (arg == cfun->static_chain_decl) 394 return true; 395 396 fntype = TREE_TYPE (current_function_decl); 397 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs)) 398 { 399 attrs = lookup_attribute ("nonnull", attrs); 400 401 /* If "nonnull" wasn't specified, we know nothing about the argument. */ 402 if (attrs == NULL_TREE) 403 return false; 404 405 /* If "nonnull" applies to all the arguments, then ARG is non-null. */ 406 if (TREE_VALUE (attrs) == NULL_TREE) 407 return true; 408 409 /* Get the position number for ARG in the function signature. */ 410 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); 411 t; 412 t = DECL_CHAIN (t), arg_num++) 413 { 414 if (t == arg) 415 break; 416 } 417 418 gcc_assert (t == arg); 419 420 /* Now see if ARG_NUM is mentioned in the nonnull list. */ 421 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) 422 { 423 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) 424 return true; 425 } 426 } 427 428 return false; 429} 430 431 432/* Set value range VR to VR_UNDEFINED. */ 433 434static inline void 435set_value_range_to_undefined (value_range_t *vr) 436{ 437 vr->type = VR_UNDEFINED; 438 vr->min = vr->max = NULL_TREE; 439 if (vr->equiv) 440 bitmap_clear (vr->equiv); 441} 442 443 444/* Set value range VR to VR_VARYING. */ 445 446static inline void 447set_value_range_to_varying (value_range_t *vr) 448{ 449 vr->type = VR_VARYING; 450 vr->min = vr->max = NULL_TREE; 451 if (vr->equiv) 452 bitmap_clear (vr->equiv); 453} 454 455 456/* Set value range VR to {T, MIN, MAX, EQUIV}. */ 457 458static void 459set_value_range (value_range_t *vr, enum value_range_type t, tree min, 460 tree max, bitmap equiv) 461{ 462#if defined ENABLE_CHECKING 463 /* Check the validity of the range. */ 464 if (t == VR_RANGE || t == VR_ANTI_RANGE) 465 { 466 int cmp; 467 468 gcc_assert (min && max); 469 470 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min)) 471 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max))); 472 473 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) 474 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); 475 476 cmp = compare_values (min, max); 477 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); 478 479 if (needs_overflow_infinity (TREE_TYPE (min))) 480 gcc_assert (!is_overflow_infinity (min) 481 || !is_overflow_infinity (max)); 482 } 483 484 if (t == VR_UNDEFINED || t == VR_VARYING) 485 gcc_assert (min == NULL_TREE && max == NULL_TREE); 486 487 if (t == VR_UNDEFINED || t == VR_VARYING) 488 gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); 489#endif 490 491 vr->type = t; 492 vr->min = min; 493 vr->max = max; 494 495 /* Since updating the equivalence set involves deep copying the 496 bitmaps, only do it if absolutely necessary. */ 497 if (vr->equiv == NULL 498 && equiv != NULL) 499 vr->equiv = BITMAP_ALLOC (NULL); 500 501 if (equiv != vr->equiv) 502 { 503 if (equiv && !bitmap_empty_p (equiv)) 504 bitmap_copy (vr->equiv, equiv); 505 else 506 bitmap_clear (vr->equiv); 507 } 508} 509 510 511/* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. 512 This means adjusting T, MIN and MAX representing the case of a 513 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] 514 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. 515 In corner cases where MAX+1 or MIN-1 wraps this will fall back 516 to varying. 517 This routine exists to ease canonicalization in the case where we 518 extract ranges from var + CST op limit. */ 519 520static void 521set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, 522 tree min, tree max, bitmap equiv) 523{ 524 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ 525 if (t == VR_UNDEFINED) 526 { 527 set_value_range_to_undefined (vr); 528 return; 529 } 530 else if (t == VR_VARYING) 531 { 532 set_value_range_to_varying (vr); 533 return; 534 } 535 536 /* Nothing to canonicalize for symbolic ranges. */ 537 if (TREE_CODE (min) != INTEGER_CST 538 || TREE_CODE (max) != INTEGER_CST) 539 { 540 set_value_range (vr, t, min, max, equiv); 541 return; 542 } 543 544 /* Wrong order for min and max, to swap them and the VR type we need 545 to adjust them. */ 546 if (tree_int_cst_lt (max, min)) 547 { 548 tree one, tmp; 549 550 /* For one bit precision if max < min, then the swapped 551 range covers all values, so for VR_RANGE it is varying and 552 for VR_ANTI_RANGE empty range, so drop to varying as well. */ 553 if (TYPE_PRECISION (TREE_TYPE (min)) == 1) 554 { 555 set_value_range_to_varying (vr); 556 return; 557 } 558 559 one = build_int_cst (TREE_TYPE (min), 1); 560 tmp = int_const_binop (PLUS_EXPR, max, one); 561 max = int_const_binop (MINUS_EXPR, min, one); 562 min = tmp; 563 564 /* There's one corner case, if we had [C+1, C] before we now have 565 that again. But this represents an empty value range, so drop 566 to varying in this case. */ 567 if (tree_int_cst_lt (max, min)) 568 { 569 set_value_range_to_varying (vr); 570 return; 571 } 572 573 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; 574 } 575 576 /* Anti-ranges that can be represented as ranges should be so. */ 577 if (t == VR_ANTI_RANGE) 578 { 579 bool is_min = vrp_val_is_min (min); 580 bool is_max = vrp_val_is_max (max); 581 582 if (is_min && is_max) 583 { 584 /* We cannot deal with empty ranges, drop to varying. 585 ??? This could be VR_UNDEFINED instead. */ 586 set_value_range_to_varying (vr); 587 return; 588 } 589 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 590 && (is_min || is_max)) 591 { 592 /* Non-empty boolean ranges can always be represented 593 as a singleton range. */ 594 if (is_min) 595 min = max = vrp_val_max (TREE_TYPE (min)); 596 else 597 min = max = vrp_val_min (TREE_TYPE (min)); 598 t = VR_RANGE; 599 } 600 else if (is_min 601 /* As a special exception preserve non-null ranges. */ 602 && !(TYPE_UNSIGNED (TREE_TYPE (min)) 603 && integer_zerop (max))) 604 { 605 tree one = build_int_cst (TREE_TYPE (max), 1); 606 min = int_const_binop (PLUS_EXPR, max, one); 607 max = vrp_val_max (TREE_TYPE (max)); 608 t = VR_RANGE; 609 } 610 else if (is_max) 611 { 612 tree one = build_int_cst (TREE_TYPE (min), 1); 613 max = int_const_binop (MINUS_EXPR, min, one); 614 min = vrp_val_min (TREE_TYPE (min)); 615 t = VR_RANGE; 616 } 617 } 618 619 /* Drop [-INF(OVF), +INF(OVF)] to varying. */ 620 if (needs_overflow_infinity (TREE_TYPE (min)) 621 && is_overflow_infinity (min) 622 && is_overflow_infinity (max)) 623 { 624 set_value_range_to_varying (vr); 625 return; 626 } 627 628 set_value_range (vr, t, min, max, equiv); 629} 630 631/* Copy value range FROM into value range TO. */ 632 633static inline void 634copy_value_range (value_range_t *to, value_range_t *from) 635{ 636 set_value_range (to, from->type, from->min, from->max, from->equiv); 637} 638 639/* Set value range VR to a single value. This function is only called 640 with values we get from statements, and exists to clear the 641 TREE_OVERFLOW flag so that we don't think we have an overflow 642 infinity when we shouldn't. */ 643 644static inline void 645set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) 646{ 647 gcc_assert (is_gimple_min_invariant (val)); 648 if (TREE_OVERFLOW_P (val)) 649 val = drop_tree_overflow (val); 650 set_value_range (vr, VR_RANGE, val, val, equiv); 651} 652 653/* Set value range VR to a non-negative range of type TYPE. 654 OVERFLOW_INFINITY indicates whether to use an overflow infinity 655 rather than TYPE_MAX_VALUE; this should be true if we determine 656 that the range is nonnegative based on the assumption that signed 657 overflow does not occur. */ 658 659static inline void 660set_value_range_to_nonnegative (value_range_t *vr, tree type, 661 bool overflow_infinity) 662{ 663 tree zero; 664 665 if (overflow_infinity && !supports_overflow_infinity (type)) 666 { 667 set_value_range_to_varying (vr); 668 return; 669 } 670 671 zero = build_int_cst (type, 0); 672 set_value_range (vr, VR_RANGE, zero, 673 (overflow_infinity 674 ? positive_overflow_infinity (type) 675 : TYPE_MAX_VALUE (type)), 676 vr->equiv); 677} 678 679/* Set value range VR to a non-NULL range of type TYPE. */ 680 681static inline void 682set_value_range_to_nonnull (value_range_t *vr, tree type) 683{ 684 tree zero = build_int_cst (type, 0); 685 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); 686} 687 688 689/* Set value range VR to a NULL range of type TYPE. */ 690 691static inline void 692set_value_range_to_null (value_range_t *vr, tree type) 693{ 694 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); 695} 696 697 698/* Set value range VR to a range of a truthvalue of type TYPE. */ 699 700static inline void 701set_value_range_to_truthvalue (value_range_t *vr, tree type) 702{ 703 if (TYPE_PRECISION (type) == 1) 704 set_value_range_to_varying (vr); 705 else 706 set_value_range (vr, VR_RANGE, 707 build_int_cst (type, 0), build_int_cst (type, 1), 708 vr->equiv); 709} 710 711 712/* If abs (min) < abs (max), set VR to [-max, max], if 713 abs (min) >= abs (max), set VR to [-min, min]. */ 714 715static void 716abs_extent_range (value_range_t *vr, tree min, tree max) 717{ 718 int cmp; 719 720 gcc_assert (TREE_CODE (min) == INTEGER_CST); 721 gcc_assert (TREE_CODE (max) == INTEGER_CST); 722 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); 723 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); 724 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); 725 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); 726 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) 727 { 728 set_value_range_to_varying (vr); 729 return; 730 } 731 cmp = compare_values (min, max); 732 if (cmp == -1) 733 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); 734 else if (cmp == 0 || cmp == 1) 735 { 736 max = min; 737 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); 738 } 739 else 740 { 741 set_value_range_to_varying (vr); 742 return; 743 } 744 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); 745} 746 747 748/* Return value range information for VAR. 749 750 If we have no values ranges recorded (ie, VRP is not running), then 751 return NULL. Otherwise create an empty range if none existed for VAR. */ 752 753static value_range_t * 754get_value_range (const_tree var) 755{ 756 static const struct value_range_d vr_const_varying 757 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; 758 value_range_t *vr; 759 tree sym; 760 unsigned ver = SSA_NAME_VERSION (var); 761 762 /* If we have no recorded ranges, then return NULL. */ 763 if (! vr_value) 764 return NULL; 765 766 /* If we query the range for a new SSA name return an unmodifiable VARYING. 767 We should get here at most from the substitute-and-fold stage which 768 will never try to change values. */ 769 if (ver >= num_vr_values) 770 return CONST_CAST (value_range_t *, &vr_const_varying); 771 772 vr = vr_value[ver]; 773 if (vr) 774 return vr; 775 776 /* After propagation finished do not allocate new value-ranges. */ 777 if (values_propagated) 778 return CONST_CAST (value_range_t *, &vr_const_varying); 779 780 /* Create a default value range. */ 781 vr_value[ver] = vr = XCNEW (value_range_t); 782 783 /* Defer allocating the equivalence set. */ 784 vr->equiv = NULL; 785 786 /* If VAR is a default definition of a parameter, the variable can 787 take any value in VAR's type. */ 788 if (SSA_NAME_IS_DEFAULT_DEF (var)) 789 { 790 sym = SSA_NAME_VAR (var); 791 if (TREE_CODE (sym) == PARM_DECL) 792 { 793 /* Try to use the "nonnull" attribute to create ~[0, 0] 794 anti-ranges for pointers. Note that this is only valid with 795 default definitions of PARM_DECLs. */ 796 if (POINTER_TYPE_P (TREE_TYPE (sym)) 797 && nonnull_arg_p (sym)) 798 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); 799 else 800 set_value_range_to_varying (vr); 801 } 802 else if (TREE_CODE (sym) == RESULT_DECL 803 && DECL_BY_REFERENCE (sym)) 804 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); 805 } 806 807 return vr; 808} 809 810/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ 811 812static inline bool 813vrp_operand_equal_p (const_tree val1, const_tree val2) 814{ 815 if (val1 == val2) 816 return true; 817 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) 818 return false; 819 return is_overflow_infinity (val1) == is_overflow_infinity (val2); 820} 821 822/* Return true, if the bitmaps B1 and B2 are equal. */ 823 824static inline bool 825vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) 826{ 827 return (b1 == b2 828 || ((!b1 || bitmap_empty_p (b1)) 829 && (!b2 || bitmap_empty_p (b2))) 830 || (b1 && b2 831 && bitmap_equal_p (b1, b2))); 832} 833 834/* Update the value range and equivalence set for variable VAR to 835 NEW_VR. Return true if NEW_VR is different from VAR's previous 836 value. 837 838 NOTE: This function assumes that NEW_VR is a temporary value range 839 object created for the sole purpose of updating VAR's range. The 840 storage used by the equivalence set from NEW_VR will be freed by 841 this function. Do not call update_value_range when NEW_VR 842 is the range object associated with another SSA name. */ 843 844static inline bool 845update_value_range (const_tree var, value_range_t *new_vr) 846{ 847 value_range_t *old_vr; 848 bool is_new; 849 850 /* If there is a value-range on the SSA name from earlier analysis 851 factor that in. */ 852 if (INTEGRAL_TYPE_P (TREE_TYPE (var))) 853 { 854 wide_int min, max; 855 value_range_type rtype = get_range_info (var, &min, &max); 856 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE) 857 { 858 value_range_d nr; 859 nr.type = rtype; 860 nr.min = wide_int_to_tree (TREE_TYPE (var), min); 861 nr.max = wide_int_to_tree (TREE_TYPE (var), max); 862 nr.equiv = NULL; 863 vrp_intersect_ranges (new_vr, &nr); 864 } 865 } 866 867 /* Update the value range, if necessary. */ 868 old_vr = get_value_range (var); 869 is_new = old_vr->type != new_vr->type 870 || !vrp_operand_equal_p (old_vr->min, new_vr->min) 871 || !vrp_operand_equal_p (old_vr->max, new_vr->max) 872 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); 873 874 if (is_new) 875 { 876 /* Do not allow transitions up the lattice. The following 877 is slightly more awkward than just new_vr->type < old_vr->type 878 because VR_RANGE and VR_ANTI_RANGE need to be considered 879 the same. We may not have is_new when transitioning to 880 UNDEFINED. If old_vr->type is VARYING, we shouldn't be 881 called. */ 882 if (new_vr->type == VR_UNDEFINED) 883 { 884 BITMAP_FREE (new_vr->equiv); 885 set_value_range_to_varying (old_vr); 886 set_value_range_to_varying (new_vr); 887 return true; 888 } 889 else 890 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, 891 new_vr->equiv); 892 } 893 894 BITMAP_FREE (new_vr->equiv); 895 896 return is_new; 897} 898 899 900/* Add VAR and VAR's equivalence set to EQUIV. This is the central 901 point where equivalence processing can be turned on/off. */ 902 903static void 904add_equivalence (bitmap *equiv, const_tree var) 905{ 906 unsigned ver = SSA_NAME_VERSION (var); 907 value_range_t *vr = vr_value[ver]; 908 909 if (*equiv == NULL) 910 *equiv = BITMAP_ALLOC (NULL); 911 bitmap_set_bit (*equiv, ver); 912 if (vr && vr->equiv) 913 bitmap_ior_into (*equiv, vr->equiv); 914} 915 916 917/* Return true if VR is ~[0, 0]. */ 918 919static inline bool 920range_is_nonnull (value_range_t *vr) 921{ 922 return vr->type == VR_ANTI_RANGE 923 && integer_zerop (vr->min) 924 && integer_zerop (vr->max); 925} 926 927 928/* Return true if VR is [0, 0]. */ 929 930static inline bool 931range_is_null (value_range_t *vr) 932{ 933 return vr->type == VR_RANGE 934 && integer_zerop (vr->min) 935 && integer_zerop (vr->max); 936} 937 938/* Return true if max and min of VR are INTEGER_CST. It's not necessary 939 a singleton. */ 940 941static inline bool 942range_int_cst_p (value_range_t *vr) 943{ 944 return (vr->type == VR_RANGE 945 && TREE_CODE (vr->max) == INTEGER_CST 946 && TREE_CODE (vr->min) == INTEGER_CST); 947} 948 949/* Return true if VR is a INTEGER_CST singleton. */ 950 951static inline bool 952range_int_cst_singleton_p (value_range_t *vr) 953{ 954 return (range_int_cst_p (vr) 955 && !is_overflow_infinity (vr->min) 956 && !is_overflow_infinity (vr->max) 957 && tree_int_cst_equal (vr->min, vr->max)); 958} 959 960/* Return true if value range VR involves at least one symbol. */ 961 962static inline bool 963symbolic_range_p (value_range_t *vr) 964{ 965 return (!is_gimple_min_invariant (vr->min) 966 || !is_gimple_min_invariant (vr->max)); 967} 968 969/* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE 970 otherwise. We only handle additive operations and set NEG to true if the 971 symbol is negated and INV to the invariant part, if any. */ 972 973static tree 974get_single_symbol (tree t, bool *neg, tree *inv) 975{ 976 bool neg_; 977 tree inv_; 978 979 if (TREE_CODE (t) == PLUS_EXPR 980 || TREE_CODE (t) == POINTER_PLUS_EXPR 981 || TREE_CODE (t) == MINUS_EXPR) 982 { 983 if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) 984 { 985 neg_ = (TREE_CODE (t) == MINUS_EXPR); 986 inv_ = TREE_OPERAND (t, 0); 987 t = TREE_OPERAND (t, 1); 988 } 989 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) 990 { 991 neg_ = false; 992 inv_ = TREE_OPERAND (t, 1); 993 t = TREE_OPERAND (t, 0); 994 } 995 else 996 return NULL_TREE; 997 } 998 else 999 { 1000 neg_ = false; 1001 inv_ = NULL_TREE; 1002 } 1003 1004 if (TREE_CODE (t) == NEGATE_EXPR) 1005 { 1006 t = TREE_OPERAND (t, 0); 1007 neg_ = !neg_; 1008 } 1009 1010 if (TREE_CODE (t) != SSA_NAME) 1011 return NULL_TREE; 1012 1013 *neg = neg_; 1014 *inv = inv_; 1015 return t; 1016} 1017 1018/* The reverse operation: build a symbolic expression with TYPE 1019 from symbol SYM, negated according to NEG, and invariant INV. */ 1020 1021static tree 1022build_symbolic_expr (tree type, tree sym, bool neg, tree inv) 1023{ 1024 const bool pointer_p = POINTER_TYPE_P (type); 1025 tree t = sym; 1026 1027 if (neg) 1028 t = build1 (NEGATE_EXPR, type, t); 1029 1030 if (integer_zerop (inv)) 1031 return t; 1032 1033 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); 1034} 1035 1036/* Return true if value range VR involves exactly one symbol SYM. */ 1037 1038static bool 1039symbolic_range_based_on_p (value_range_t *vr, const_tree sym) 1040{ 1041 bool neg, min_has_symbol, max_has_symbol; 1042 tree inv; 1043 1044 if (is_gimple_min_invariant (vr->min)) 1045 min_has_symbol = false; 1046 else if (get_single_symbol (vr->min, &neg, &inv) == sym) 1047 min_has_symbol = true; 1048 else 1049 return false; 1050 1051 if (is_gimple_min_invariant (vr->max)) 1052 max_has_symbol = false; 1053 else if (get_single_symbol (vr->max, &neg, &inv) == sym) 1054 max_has_symbol = true; 1055 else 1056 return false; 1057 1058 return (min_has_symbol || max_has_symbol); 1059} 1060 1061/* Return true if value range VR uses an overflow infinity. */ 1062 1063static inline bool 1064overflow_infinity_range_p (value_range_t *vr) 1065{ 1066 return (vr->type == VR_RANGE 1067 && (is_overflow_infinity (vr->min) 1068 || is_overflow_infinity (vr->max))); 1069} 1070 1071/* Return false if we can not make a valid comparison based on VR; 1072 this will be the case if it uses an overflow infinity and overflow 1073 is not undefined (i.e., -fno-strict-overflow is in effect). 1074 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR 1075 uses an overflow infinity. */ 1076 1077static bool 1078usable_range_p (value_range_t *vr, bool *strict_overflow_p) 1079{ 1080 gcc_assert (vr->type == VR_RANGE); 1081 if (is_overflow_infinity (vr->min)) 1082 { 1083 *strict_overflow_p = true; 1084 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) 1085 return false; 1086 } 1087 if (is_overflow_infinity (vr->max)) 1088 { 1089 *strict_overflow_p = true; 1090 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) 1091 return false; 1092 } 1093 return true; 1094} 1095 1096 1097/* Return true if the result of assignment STMT is know to be non-negative. 1098 If the return value is based on the assumption that signed overflow is 1099 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 1100 *STRICT_OVERFLOW_P.*/ 1101 1102static bool 1103gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 1104{ 1105 enum tree_code code = gimple_assign_rhs_code (stmt); 1106 switch (get_gimple_rhs_class (code)) 1107 { 1108 case GIMPLE_UNARY_RHS: 1109 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 1110 gimple_expr_type (stmt), 1111 gimple_assign_rhs1 (stmt), 1112 strict_overflow_p); 1113 case GIMPLE_BINARY_RHS: 1114 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 1115 gimple_expr_type (stmt), 1116 gimple_assign_rhs1 (stmt), 1117 gimple_assign_rhs2 (stmt), 1118 strict_overflow_p); 1119 case GIMPLE_TERNARY_RHS: 1120 return false; 1121 case GIMPLE_SINGLE_RHS: 1122 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), 1123 strict_overflow_p); 1124 case GIMPLE_INVALID_RHS: 1125 gcc_unreachable (); 1126 default: 1127 gcc_unreachable (); 1128 } 1129} 1130 1131/* Return true if return value of call STMT is know to be non-negative. 1132 If the return value is based on the assumption that signed overflow is 1133 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 1134 *STRICT_OVERFLOW_P.*/ 1135 1136static bool 1137gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 1138{ 1139 tree arg0 = gimple_call_num_args (stmt) > 0 ? 1140 gimple_call_arg (stmt, 0) : NULL_TREE; 1141 tree arg1 = gimple_call_num_args (stmt) > 1 ? 1142 gimple_call_arg (stmt, 1) : NULL_TREE; 1143 1144 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), 1145 gimple_call_fndecl (stmt), 1146 arg0, 1147 arg1, 1148 strict_overflow_p); 1149} 1150 1151/* Return true if STMT is know to to compute a non-negative value. 1152 If the return value is based on the assumption that signed overflow is 1153 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 1154 *STRICT_OVERFLOW_P.*/ 1155 1156static bool 1157gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 1158{ 1159 switch (gimple_code (stmt)) 1160 { 1161 case GIMPLE_ASSIGN: 1162 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); 1163 case GIMPLE_CALL: 1164 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); 1165 default: 1166 gcc_unreachable (); 1167 } 1168} 1169 1170/* Return true if the result of assignment STMT is know to be non-zero. 1171 If the return value is based on the assumption that signed overflow is 1172 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 1173 *STRICT_OVERFLOW_P.*/ 1174 1175static bool 1176gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 1177{ 1178 enum tree_code code = gimple_assign_rhs_code (stmt); 1179 switch (get_gimple_rhs_class (code)) 1180 { 1181 case GIMPLE_UNARY_RHS: 1182 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 1183 gimple_expr_type (stmt), 1184 gimple_assign_rhs1 (stmt), 1185 strict_overflow_p); 1186 case GIMPLE_BINARY_RHS: 1187 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 1188 gimple_expr_type (stmt), 1189 gimple_assign_rhs1 (stmt), 1190 gimple_assign_rhs2 (stmt), 1191 strict_overflow_p); 1192 case GIMPLE_TERNARY_RHS: 1193 return false; 1194 case GIMPLE_SINGLE_RHS: 1195 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), 1196 strict_overflow_p); 1197 case GIMPLE_INVALID_RHS: 1198 gcc_unreachable (); 1199 default: 1200 gcc_unreachable (); 1201 } 1202} 1203 1204/* Return true if STMT is known to compute a non-zero value. 1205 If the return value is based on the assumption that signed overflow is 1206 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 1207 *STRICT_OVERFLOW_P.*/ 1208 1209static bool 1210gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 1211{ 1212 switch (gimple_code (stmt)) 1213 { 1214 case GIMPLE_ASSIGN: 1215 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); 1216 case GIMPLE_CALL: 1217 { 1218 tree fndecl = gimple_call_fndecl (stmt); 1219 if (!fndecl) return false; 1220 if (flag_delete_null_pointer_checks && !flag_check_new 1221 && DECL_IS_OPERATOR_NEW (fndecl) 1222 && !TREE_NOTHROW (fndecl)) 1223 return true; 1224 if (flag_delete_null_pointer_checks && 1225 lookup_attribute ("returns_nonnull", 1226 TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))) 1227 return true; 1228 return gimple_alloca_call_p (stmt); 1229 } 1230 default: 1231 gcc_unreachable (); 1232 } 1233} 1234 1235/* Like tree_expr_nonzero_warnv_p, but this function uses value ranges 1236 obtained so far. */ 1237 1238static bool 1239vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) 1240{ 1241 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) 1242 return true; 1243 1244 /* If we have an expression of the form &X->a, then the expression 1245 is nonnull if X is nonnull. */ 1246 if (is_gimple_assign (stmt) 1247 && gimple_assign_rhs_code (stmt) == ADDR_EXPR) 1248 { 1249 tree expr = gimple_assign_rhs1 (stmt); 1250 tree base = get_base_address (TREE_OPERAND (expr, 0)); 1251 1252 if (base != NULL_TREE 1253 && TREE_CODE (base) == MEM_REF 1254 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 1255 { 1256 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); 1257 if (range_is_nonnull (vr)) 1258 return true; 1259 } 1260 } 1261 1262 return false; 1263} 1264 1265/* Returns true if EXPR is a valid value (as expected by compare_values) -- 1266 a gimple invariant, or SSA_NAME +- CST. */ 1267 1268static bool 1269valid_value_p (tree expr) 1270{ 1271 if (TREE_CODE (expr) == SSA_NAME) 1272 return true; 1273 1274 if (TREE_CODE (expr) == PLUS_EXPR 1275 || TREE_CODE (expr) == MINUS_EXPR) 1276 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME 1277 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); 1278 1279 return is_gimple_min_invariant (expr); 1280} 1281 1282/* Return 1283 1 if VAL < VAL2 1284 0 if !(VAL < VAL2) 1285 -2 if those are incomparable. */ 1286static inline int 1287operand_less_p (tree val, tree val2) 1288{ 1289 /* LT is folded faster than GE and others. Inline the common case. */ 1290 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) 1291 return tree_int_cst_lt (val, val2); 1292 else 1293 { 1294 tree tcmp; 1295 1296 fold_defer_overflow_warnings (); 1297 1298 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); 1299 1300 fold_undefer_and_ignore_overflow_warnings (); 1301 1302 if (!tcmp 1303 || TREE_CODE (tcmp) != INTEGER_CST) 1304 return -2; 1305 1306 if (!integer_zerop (tcmp)) 1307 return 1; 1308 } 1309 1310 /* val >= val2, not considering overflow infinity. */ 1311 if (is_negative_overflow_infinity (val)) 1312 return is_negative_overflow_infinity (val2) ? 0 : 1; 1313 else if (is_positive_overflow_infinity (val2)) 1314 return is_positive_overflow_infinity (val) ? 0 : 1; 1315 1316 return 0; 1317} 1318 1319/* Compare two values VAL1 and VAL2. Return 1320 1321 -2 if VAL1 and VAL2 cannot be compared at compile-time, 1322 -1 if VAL1 < VAL2, 1323 0 if VAL1 == VAL2, 1324 +1 if VAL1 > VAL2, and 1325 +2 if VAL1 != VAL2 1326 1327 This is similar to tree_int_cst_compare but supports pointer values 1328 and values that cannot be compared at compile time. 1329 1330 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to 1331 true if the return value is only valid if we assume that signed 1332 overflow is undefined. */ 1333 1334static int 1335compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) 1336{ 1337 if (val1 == val2) 1338 return 0; 1339 1340 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or 1341 both integers. */ 1342 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) 1343 == POINTER_TYPE_P (TREE_TYPE (val2))); 1344 1345 /* Convert the two values into the same type. This is needed because 1346 sizetype causes sign extension even for unsigned types. */ 1347 val2 = fold_convert (TREE_TYPE (val1), val2); 1348 STRIP_USELESS_TYPE_CONVERSION (val2); 1349 1350 if ((TREE_CODE (val1) == SSA_NAME 1351 || (TREE_CODE (val1) == NEGATE_EXPR 1352 && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME) 1353 || TREE_CODE (val1) == PLUS_EXPR 1354 || TREE_CODE (val1) == MINUS_EXPR) 1355 && (TREE_CODE (val2) == SSA_NAME 1356 || (TREE_CODE (val2) == NEGATE_EXPR 1357 && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME) 1358 || TREE_CODE (val2) == PLUS_EXPR 1359 || TREE_CODE (val2) == MINUS_EXPR)) 1360 { 1361 tree n1, c1, n2, c2; 1362 enum tree_code code1, code2; 1363 1364 /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME', 1365 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the 1366 same name, return -2. */ 1367 if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR) 1368 { 1369 code1 = SSA_NAME; 1370 n1 = val1; 1371 c1 = NULL_TREE; 1372 } 1373 else 1374 { 1375 code1 = TREE_CODE (val1); 1376 n1 = TREE_OPERAND (val1, 0); 1377 c1 = TREE_OPERAND (val1, 1); 1378 if (tree_int_cst_sgn (c1) == -1) 1379 { 1380 if (is_negative_overflow_infinity (c1)) 1381 return -2; 1382 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); 1383 if (!c1) 1384 return -2; 1385 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1386 } 1387 } 1388 1389 if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR) 1390 { 1391 code2 = SSA_NAME; 1392 n2 = val2; 1393 c2 = NULL_TREE; 1394 } 1395 else 1396 { 1397 code2 = TREE_CODE (val2); 1398 n2 = TREE_OPERAND (val2, 0); 1399 c2 = TREE_OPERAND (val2, 1); 1400 if (tree_int_cst_sgn (c2) == -1) 1401 { 1402 if (is_negative_overflow_infinity (c2)) 1403 return -2; 1404 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); 1405 if (!c2) 1406 return -2; 1407 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1408 } 1409 } 1410 1411 /* Both values must use the same name. */ 1412 if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR) 1413 { 1414 n1 = TREE_OPERAND (n1, 0); 1415 n2 = TREE_OPERAND (n2, 0); 1416 } 1417 if (n1 != n2) 1418 return -2; 1419 1420 if (code1 == SSA_NAME && code2 == SSA_NAME) 1421 /* NAME == NAME */ 1422 return 0; 1423 1424 /* If overflow is defined we cannot simplify more. */ 1425 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) 1426 return -2; 1427 1428 if (strict_overflow_p != NULL 1429 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) 1430 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) 1431 *strict_overflow_p = true; 1432 1433 if (code1 == SSA_NAME) 1434 { 1435 if (code2 == PLUS_EXPR) 1436 /* NAME < NAME + CST */ 1437 return -1; 1438 else if (code2 == MINUS_EXPR) 1439 /* NAME > NAME - CST */ 1440 return 1; 1441 } 1442 else if (code1 == PLUS_EXPR) 1443 { 1444 if (code2 == SSA_NAME) 1445 /* NAME + CST > NAME */ 1446 return 1; 1447 else if (code2 == PLUS_EXPR) 1448 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ 1449 return compare_values_warnv (c1, c2, strict_overflow_p); 1450 else if (code2 == MINUS_EXPR) 1451 /* NAME + CST1 > NAME - CST2 */ 1452 return 1; 1453 } 1454 else if (code1 == MINUS_EXPR) 1455 { 1456 if (code2 == SSA_NAME) 1457 /* NAME - CST < NAME */ 1458 return -1; 1459 else if (code2 == PLUS_EXPR) 1460 /* NAME - CST1 < NAME + CST2 */ 1461 return -1; 1462 else if (code2 == MINUS_EXPR) 1463 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that 1464 C1 and C2 are swapped in the call to compare_values. */ 1465 return compare_values_warnv (c2, c1, strict_overflow_p); 1466 } 1467 1468 gcc_unreachable (); 1469 } 1470 1471 /* We cannot compare non-constants. */ 1472 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) 1473 return -2; 1474 1475 if (!POINTER_TYPE_P (TREE_TYPE (val1))) 1476 { 1477 /* We cannot compare overflowed values, except for overflow 1478 infinities. */ 1479 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) 1480 { 1481 if (strict_overflow_p != NULL) 1482 *strict_overflow_p = true; 1483 if (is_negative_overflow_infinity (val1)) 1484 return is_negative_overflow_infinity (val2) ? 0 : -1; 1485 else if (is_negative_overflow_infinity (val2)) 1486 return 1; 1487 else if (is_positive_overflow_infinity (val1)) 1488 return is_positive_overflow_infinity (val2) ? 0 : 1; 1489 else if (is_positive_overflow_infinity (val2)) 1490 return -1; 1491 return -2; 1492 } 1493 1494 return tree_int_cst_compare (val1, val2); 1495 } 1496 else 1497 { 1498 tree t; 1499 1500 /* First see if VAL1 and VAL2 are not the same. */ 1501 if (val1 == val2 || operand_equal_p (val1, val2, 0)) 1502 return 0; 1503 1504 /* If VAL1 is a lower address than VAL2, return -1. */ 1505 if (operand_less_p (val1, val2) == 1) 1506 return -1; 1507 1508 /* If VAL1 is a higher address than VAL2, return +1. */ 1509 if (operand_less_p (val2, val1) == 1) 1510 return 1; 1511 1512 /* If VAL1 is different than VAL2, return +2. 1513 For integer constants we either have already returned -1 or 1 1514 or they are equivalent. We still might succeed in proving 1515 something about non-trivial operands. */ 1516 if (TREE_CODE (val1) != INTEGER_CST 1517 || TREE_CODE (val2) != INTEGER_CST) 1518 { 1519 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); 1520 if (t && integer_onep (t)) 1521 return 2; 1522 } 1523 1524 return -2; 1525 } 1526} 1527 1528/* Compare values like compare_values_warnv, but treat comparisons of 1529 nonconstants which rely on undefined overflow as incomparable. */ 1530 1531static int 1532compare_values (tree val1, tree val2) 1533{ 1534 bool sop; 1535 int ret; 1536 1537 sop = false; 1538 ret = compare_values_warnv (val1, val2, &sop); 1539 if (sop 1540 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) 1541 ret = -2; 1542 return ret; 1543} 1544 1545 1546/* Return 1 if VAL is inside value range MIN <= VAL <= MAX, 1547 0 if VAL is not inside [MIN, MAX], 1548 -2 if we cannot tell either way. 1549 1550 Benchmark compile/20001226-1.c compilation time after changing this 1551 function. */ 1552 1553static inline int 1554value_inside_range (tree val, tree min, tree max) 1555{ 1556 int cmp1, cmp2; 1557 1558 cmp1 = operand_less_p (val, min); 1559 if (cmp1 == -2) 1560 return -2; 1561 if (cmp1 == 1) 1562 return 0; 1563 1564 cmp2 = operand_less_p (max, val); 1565 if (cmp2 == -2) 1566 return -2; 1567 1568 return !cmp2; 1569} 1570 1571 1572/* Return true if value ranges VR0 and VR1 have a non-empty 1573 intersection. 1574 1575 Benchmark compile/20001226-1.c compilation time after changing this 1576 function. 1577 */ 1578 1579static inline bool 1580value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) 1581{ 1582 /* The value ranges do not intersect if the maximum of the first range is 1583 less than the minimum of the second range or vice versa. 1584 When those relations are unknown, we can't do any better. */ 1585 if (operand_less_p (vr0->max, vr1->min) != 0) 1586 return false; 1587 if (operand_less_p (vr1->max, vr0->min) != 0) 1588 return false; 1589 return true; 1590} 1591 1592 1593/* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not 1594 include the value zero, -2 if we cannot tell. */ 1595 1596static inline int 1597range_includes_zero_p (tree min, tree max) 1598{ 1599 tree zero = build_int_cst (TREE_TYPE (min), 0); 1600 return value_inside_range (zero, min, max); 1601} 1602 1603/* Return true if *VR is know to only contain nonnegative values. */ 1604 1605static inline bool 1606value_range_nonnegative_p (value_range_t *vr) 1607{ 1608 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range 1609 which would return a useful value should be encoded as a 1610 VR_RANGE. */ 1611 if (vr->type == VR_RANGE) 1612 { 1613 int result = compare_values (vr->min, integer_zero_node); 1614 return (result == 0 || result == 1); 1615 } 1616 1617 return false; 1618} 1619 1620/* If *VR has a value rante that is a single constant value return that, 1621 otherwise return NULL_TREE. */ 1622 1623static tree 1624value_range_constant_singleton (value_range_t *vr) 1625{ 1626 if (vr->type == VR_RANGE 1627 && operand_equal_p (vr->min, vr->max, 0) 1628 && is_gimple_min_invariant (vr->min)) 1629 return vr->min; 1630 1631 return NULL_TREE; 1632} 1633 1634/* If OP has a value range with a single constant value return that, 1635 otherwise return NULL_TREE. This returns OP itself if OP is a 1636 constant. */ 1637 1638static tree 1639op_with_constant_singleton_value_range (tree op) 1640{ 1641 if (is_gimple_min_invariant (op)) 1642 return op; 1643 1644 if (TREE_CODE (op) != SSA_NAME) 1645 return NULL_TREE; 1646 1647 return value_range_constant_singleton (get_value_range (op)); 1648} 1649 1650/* Return true if op is in a boolean [0, 1] value-range. */ 1651 1652static bool 1653op_with_boolean_value_range_p (tree op) 1654{ 1655 value_range_t *vr; 1656 1657 if (TYPE_PRECISION (TREE_TYPE (op)) == 1) 1658 return true; 1659 1660 if (integer_zerop (op) 1661 || integer_onep (op)) 1662 return true; 1663 1664 if (TREE_CODE (op) != SSA_NAME) 1665 return false; 1666 1667 vr = get_value_range (op); 1668 return (vr->type == VR_RANGE 1669 && integer_zerop (vr->min) 1670 && integer_onep (vr->max)); 1671} 1672 1673/* Extract value range information from an ASSERT_EXPR EXPR and store 1674 it in *VR_P. */ 1675 1676static void 1677extract_range_from_assert (value_range_t *vr_p, tree expr) 1678{ 1679 tree var, cond, limit, min, max, type; 1680 value_range_t *limit_vr; 1681 enum tree_code cond_code; 1682 1683 var = ASSERT_EXPR_VAR (expr); 1684 cond = ASSERT_EXPR_COND (expr); 1685 1686 gcc_assert (COMPARISON_CLASS_P (cond)); 1687 1688 /* Find VAR in the ASSERT_EXPR conditional. */ 1689 if (var == TREE_OPERAND (cond, 0) 1690 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR 1691 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) 1692 { 1693 /* If the predicate is of the form VAR COMP LIMIT, then we just 1694 take LIMIT from the RHS and use the same comparison code. */ 1695 cond_code = TREE_CODE (cond); 1696 limit = TREE_OPERAND (cond, 1); 1697 cond = TREE_OPERAND (cond, 0); 1698 } 1699 else 1700 { 1701 /* If the predicate is of the form LIMIT COMP VAR, then we need 1702 to flip around the comparison code to create the proper range 1703 for VAR. */ 1704 cond_code = swap_tree_comparison (TREE_CODE (cond)); 1705 limit = TREE_OPERAND (cond, 0); 1706 cond = TREE_OPERAND (cond, 1); 1707 } 1708 1709 limit = avoid_overflow_infinity (limit); 1710 1711 type = TREE_TYPE (var); 1712 gcc_assert (limit != var); 1713 1714 /* For pointer arithmetic, we only keep track of pointer equality 1715 and inequality. */ 1716 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) 1717 { 1718 set_value_range_to_varying (vr_p); 1719 return; 1720 } 1721 1722 /* If LIMIT is another SSA name and LIMIT has a range of its own, 1723 try to use LIMIT's range to avoid creating symbolic ranges 1724 unnecessarily. */ 1725 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; 1726 1727 /* LIMIT's range is only interesting if it has any useful information. */ 1728 if (limit_vr 1729 && (limit_vr->type == VR_UNDEFINED 1730 || limit_vr->type == VR_VARYING 1731 || symbolic_range_p (limit_vr))) 1732 limit_vr = NULL; 1733 1734 /* Initially, the new range has the same set of equivalences of 1735 VAR's range. This will be revised before returning the final 1736 value. Since assertions may be chained via mutually exclusive 1737 predicates, we will need to trim the set of equivalences before 1738 we are done. */ 1739 gcc_assert (vr_p->equiv == NULL); 1740 add_equivalence (&vr_p->equiv, var); 1741 1742 /* Extract a new range based on the asserted comparison for VAR and 1743 LIMIT's value range. Notice that if LIMIT has an anti-range, we 1744 will only use it for equality comparisons (EQ_EXPR). For any 1745 other kind of assertion, we cannot derive a range from LIMIT's 1746 anti-range that can be used to describe the new range. For 1747 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], 1748 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is 1749 no single range for x_2 that could describe LE_EXPR, so we might 1750 as well build the range [b_4, +INF] for it. 1751 One special case we handle is extracting a range from a 1752 range test encoded as (unsigned)var + CST <= limit. */ 1753 if (TREE_CODE (cond) == NOP_EXPR 1754 || TREE_CODE (cond) == PLUS_EXPR) 1755 { 1756 if (TREE_CODE (cond) == PLUS_EXPR) 1757 { 1758 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), 1759 TREE_OPERAND (cond, 1)); 1760 max = int_const_binop (PLUS_EXPR, limit, min); 1761 cond = TREE_OPERAND (cond, 0); 1762 } 1763 else 1764 { 1765 min = build_int_cst (TREE_TYPE (var), 0); 1766 max = limit; 1767 } 1768 1769 /* Make sure to not set TREE_OVERFLOW on the final type 1770 conversion. We are willingly interpreting large positive 1771 unsigned values as negative signed values here. */ 1772 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false); 1773 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false); 1774 1775 /* We can transform a max, min range to an anti-range or 1776 vice-versa. Use set_and_canonicalize_value_range which does 1777 this for us. */ 1778 if (cond_code == LE_EXPR) 1779 set_and_canonicalize_value_range (vr_p, VR_RANGE, 1780 min, max, vr_p->equiv); 1781 else if (cond_code == GT_EXPR) 1782 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, 1783 min, max, vr_p->equiv); 1784 else 1785 gcc_unreachable (); 1786 } 1787 else if (cond_code == EQ_EXPR) 1788 { 1789 enum value_range_type range_type; 1790 1791 if (limit_vr) 1792 { 1793 range_type = limit_vr->type; 1794 min = limit_vr->min; 1795 max = limit_vr->max; 1796 } 1797 else 1798 { 1799 range_type = VR_RANGE; 1800 min = limit; 1801 max = limit; 1802 } 1803 1804 set_value_range (vr_p, range_type, min, max, vr_p->equiv); 1805 1806 /* When asserting the equality VAR == LIMIT and LIMIT is another 1807 SSA name, the new range will also inherit the equivalence set 1808 from LIMIT. */ 1809 if (TREE_CODE (limit) == SSA_NAME) 1810 add_equivalence (&vr_p->equiv, limit); 1811 } 1812 else if (cond_code == NE_EXPR) 1813 { 1814 /* As described above, when LIMIT's range is an anti-range and 1815 this assertion is an inequality (NE_EXPR), then we cannot 1816 derive anything from the anti-range. For instance, if 1817 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does 1818 not imply that VAR's range is [0, 0]. So, in the case of 1819 anti-ranges, we just assert the inequality using LIMIT and 1820 not its anti-range. 1821 1822 If LIMIT_VR is a range, we can only use it to build a new 1823 anti-range if LIMIT_VR is a single-valued range. For 1824 instance, if LIMIT_VR is [0, 1], the predicate 1825 VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. 1826 Rather, it means that for value 0 VAR should be ~[0, 0] 1827 and for value 1, VAR should be ~[1, 1]. We cannot 1828 represent these ranges. 1829 1830 The only situation in which we can build a valid 1831 anti-range is when LIMIT_VR is a single-valued range 1832 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, 1833 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ 1834 if (limit_vr 1835 && limit_vr->type == VR_RANGE 1836 && compare_values (limit_vr->min, limit_vr->max) == 0) 1837 { 1838 min = limit_vr->min; 1839 max = limit_vr->max; 1840 } 1841 else 1842 { 1843 /* In any other case, we cannot use LIMIT's range to build a 1844 valid anti-range. */ 1845 min = max = limit; 1846 } 1847 1848 /* If MIN and MAX cover the whole range for their type, then 1849 just use the original LIMIT. */ 1850 if (INTEGRAL_TYPE_P (type) 1851 && vrp_val_is_min (min) 1852 && vrp_val_is_max (max)) 1853 min = max = limit; 1854 1855 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, 1856 min, max, vr_p->equiv); 1857 } 1858 else if (cond_code == LE_EXPR || cond_code == LT_EXPR) 1859 { 1860 min = TYPE_MIN_VALUE (type); 1861 1862 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1863 max = limit; 1864 else 1865 { 1866 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1867 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for 1868 LT_EXPR. */ 1869 max = limit_vr->max; 1870 } 1871 1872 /* If the maximum value forces us to be out of bounds, simply punt. 1873 It would be pointless to try and do anything more since this 1874 all should be optimized away above us. */ 1875 if ((cond_code == LT_EXPR 1876 && compare_values (max, min) == 0) 1877 || is_overflow_infinity (max)) 1878 set_value_range_to_varying (vr_p); 1879 else 1880 { 1881 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ 1882 if (cond_code == LT_EXPR) 1883 { 1884 if (TYPE_PRECISION (TREE_TYPE (max)) == 1 1885 && !TYPE_UNSIGNED (TREE_TYPE (max))) 1886 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, 1887 build_int_cst (TREE_TYPE (max), -1)); 1888 else 1889 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, 1890 build_int_cst (TREE_TYPE (max), 1)); 1891 if (EXPR_P (max)) 1892 TREE_NO_WARNING (max) = 1; 1893 } 1894 1895 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1896 } 1897 } 1898 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 1899 { 1900 max = TYPE_MAX_VALUE (type); 1901 1902 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1903 min = limit; 1904 else 1905 { 1906 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1907 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for 1908 GT_EXPR. */ 1909 min = limit_vr->min; 1910 } 1911 1912 /* If the minimum value forces us to be out of bounds, simply punt. 1913 It would be pointless to try and do anything more since this 1914 all should be optimized away above us. */ 1915 if ((cond_code == GT_EXPR 1916 && compare_values (min, max) == 0) 1917 || is_overflow_infinity (min)) 1918 set_value_range_to_varying (vr_p); 1919 else 1920 { 1921 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ 1922 if (cond_code == GT_EXPR) 1923 { 1924 if (TYPE_PRECISION (TREE_TYPE (min)) == 1 1925 && !TYPE_UNSIGNED (TREE_TYPE (min))) 1926 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, 1927 build_int_cst (TREE_TYPE (min), -1)); 1928 else 1929 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, 1930 build_int_cst (TREE_TYPE (min), 1)); 1931 if (EXPR_P (min)) 1932 TREE_NO_WARNING (min) = 1; 1933 } 1934 1935 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1936 } 1937 } 1938 else 1939 gcc_unreachable (); 1940 1941 /* Finally intersect the new range with what we already know about var. */ 1942 vrp_intersect_ranges (vr_p, get_value_range (var)); 1943} 1944 1945 1946/* Extract range information from SSA name VAR and store it in VR. If 1947 VAR has an interesting range, use it. Otherwise, create the 1948 range [VAR, VAR] and return it. This is useful in situations where 1949 we may have conditionals testing values of VARYING names. For 1950 instance, 1951 1952 x_3 = y_5; 1953 if (x_3 > y_5) 1954 ... 1955 1956 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is 1957 always false. */ 1958 1959static void 1960extract_range_from_ssa_name (value_range_t *vr, tree var) 1961{ 1962 value_range_t *var_vr = get_value_range (var); 1963 1964 if (var_vr->type != VR_VARYING) 1965 copy_value_range (vr, var_vr); 1966 else 1967 set_value_range (vr, VR_RANGE, var, var, NULL); 1968 1969 add_equivalence (&vr->equiv, var); 1970} 1971 1972 1973/* Wrapper around int_const_binop. If the operation overflows and we 1974 are not using wrapping arithmetic, then adjust the result to be 1975 -INF or +INF depending on CODE, VAL1 and VAL2. This can return 1976 NULL_TREE if we need to use an overflow infinity representation but 1977 the type does not support it. */ 1978 1979static tree 1980vrp_int_const_binop (enum tree_code code, tree val1, tree val2) 1981{ 1982 tree res; 1983 1984 res = int_const_binop (code, val1, val2); 1985 1986 /* If we are using unsigned arithmetic, operate symbolically 1987 on -INF and +INF as int_const_binop only handles signed overflow. */ 1988 if (TYPE_UNSIGNED (TREE_TYPE (val1))) 1989 { 1990 int checkz = compare_values (res, val1); 1991 bool overflow = false; 1992 1993 /* Ensure that res = val1 [+*] val2 >= val1 1994 or that res = val1 - val2 <= val1. */ 1995 if ((code == PLUS_EXPR 1996 && !(checkz == 1 || checkz == 0)) 1997 || (code == MINUS_EXPR 1998 && !(checkz == 0 || checkz == -1))) 1999 { 2000 overflow = true; 2001 } 2002 /* Checking for multiplication overflow is done by dividing the 2003 output of the multiplication by the first input of the 2004 multiplication. If the result of that division operation is 2005 not equal to the second input of the multiplication, then the 2006 multiplication overflowed. */ 2007 else if (code == MULT_EXPR && !integer_zerop (val1)) 2008 { 2009 tree tmp = int_const_binop (TRUNC_DIV_EXPR, 2010 res, 2011 val1); 2012 int check = compare_values (tmp, val2); 2013 2014 if (check != 0) 2015 overflow = true; 2016 } 2017 2018 if (overflow) 2019 { 2020 res = copy_node (res); 2021 TREE_OVERFLOW (res) = 1; 2022 } 2023 2024 } 2025 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) 2026 /* If the singed operation wraps then int_const_binop has done 2027 everything we want. */ 2028 ; 2029 /* Signed division of -1/0 overflows and by the time it gets here 2030 returns NULL_TREE. */ 2031 else if (!res) 2032 return NULL_TREE; 2033 else if ((TREE_OVERFLOW (res) 2034 && !TREE_OVERFLOW (val1) 2035 && !TREE_OVERFLOW (val2)) 2036 || is_overflow_infinity (val1) 2037 || is_overflow_infinity (val2)) 2038 { 2039 /* If the operation overflowed but neither VAL1 nor VAL2 are 2040 overflown, return -INF or +INF depending on the operation 2041 and the combination of signs of the operands. */ 2042 int sgn1 = tree_int_cst_sgn (val1); 2043 int sgn2 = tree_int_cst_sgn (val2); 2044 2045 if (needs_overflow_infinity (TREE_TYPE (res)) 2046 && !supports_overflow_infinity (TREE_TYPE (res))) 2047 return NULL_TREE; 2048 2049 /* We have to punt on adding infinities of different signs, 2050 since we can't tell what the sign of the result should be. 2051 Likewise for subtracting infinities of the same sign. */ 2052 if (((code == PLUS_EXPR && sgn1 != sgn2) 2053 || (code == MINUS_EXPR && sgn1 == sgn2)) 2054 && is_overflow_infinity (val1) 2055 && is_overflow_infinity (val2)) 2056 return NULL_TREE; 2057 2058 /* Don't try to handle division or shifting of infinities. */ 2059 if ((code == TRUNC_DIV_EXPR 2060 || code == FLOOR_DIV_EXPR 2061 || code == CEIL_DIV_EXPR 2062 || code == EXACT_DIV_EXPR 2063 || code == ROUND_DIV_EXPR 2064 || code == RSHIFT_EXPR) 2065 && (is_overflow_infinity (val1) 2066 || is_overflow_infinity (val2))) 2067 return NULL_TREE; 2068 2069 /* Notice that we only need to handle the restricted set of 2070 operations handled by extract_range_from_binary_expr. 2071 Among them, only multiplication, addition and subtraction 2072 can yield overflow without overflown operands because we 2073 are working with integral types only... except in the 2074 case VAL1 = -INF and VAL2 = -1 which overflows to +INF 2075 for division too. */ 2076 2077 /* For multiplication, the sign of the overflow is given 2078 by the comparison of the signs of the operands. */ 2079 if ((code == MULT_EXPR && sgn1 == sgn2) 2080 /* For addition, the operands must be of the same sign 2081 to yield an overflow. Its sign is therefore that 2082 of one of the operands, for example the first. For 2083 infinite operands X + -INF is negative, not positive. */ 2084 || (code == PLUS_EXPR 2085 && (sgn1 >= 0 2086 ? !is_negative_overflow_infinity (val2) 2087 : is_positive_overflow_infinity (val2))) 2088 /* For subtraction, non-infinite operands must be of 2089 different signs to yield an overflow. Its sign is 2090 therefore that of the first operand or the opposite of 2091 that of the second operand. A first operand of 0 counts 2092 as positive here, for the corner case 0 - (-INF), which 2093 overflows, but must yield +INF. For infinite operands 0 2094 - INF is negative, not positive. */ 2095 || (code == MINUS_EXPR 2096 && (sgn1 >= 0 2097 ? !is_positive_overflow_infinity (val2) 2098 : is_negative_overflow_infinity (val2))) 2099 /* We only get in here with positive shift count, so the 2100 overflow direction is the same as the sign of val1. 2101 Actually rshift does not overflow at all, but we only 2102 handle the case of shifting overflowed -INF and +INF. */ 2103 || (code == RSHIFT_EXPR 2104 && sgn1 >= 0) 2105 /* For division, the only case is -INF / -1 = +INF. */ 2106 || code == TRUNC_DIV_EXPR 2107 || code == FLOOR_DIV_EXPR 2108 || code == CEIL_DIV_EXPR 2109 || code == EXACT_DIV_EXPR 2110 || code == ROUND_DIV_EXPR) 2111 return (needs_overflow_infinity (TREE_TYPE (res)) 2112 ? positive_overflow_infinity (TREE_TYPE (res)) 2113 : TYPE_MAX_VALUE (TREE_TYPE (res))); 2114 else 2115 return (needs_overflow_infinity (TREE_TYPE (res)) 2116 ? negative_overflow_infinity (TREE_TYPE (res)) 2117 : TYPE_MIN_VALUE (TREE_TYPE (res))); 2118 } 2119 2120 return res; 2121} 2122 2123 2124/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO 2125 bitmask if some bit is unset, it means for all numbers in the range 2126 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO 2127 bitmask if some bit is set, it means for all numbers in the range 2128 the bit is 1, otherwise it might be 0 or 1. */ 2129 2130static bool 2131zero_nonzero_bits_from_vr (const tree expr_type, 2132 value_range_t *vr, 2133 wide_int *may_be_nonzero, 2134 wide_int *must_be_nonzero) 2135{ 2136 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); 2137 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); 2138 if (!range_int_cst_p (vr) 2139 || is_overflow_infinity (vr->min) 2140 || is_overflow_infinity (vr->max)) 2141 return false; 2142 2143 if (range_int_cst_singleton_p (vr)) 2144 { 2145 *may_be_nonzero = vr->min; 2146 *must_be_nonzero = *may_be_nonzero; 2147 } 2148 else if (tree_int_cst_sgn (vr->min) >= 0 2149 || tree_int_cst_sgn (vr->max) < 0) 2150 { 2151 wide_int xor_mask = wi::bit_xor (vr->min, vr->max); 2152 *may_be_nonzero = wi::bit_or (vr->min, vr->max); 2153 *must_be_nonzero = wi::bit_and (vr->min, vr->max); 2154 if (xor_mask != 0) 2155 { 2156 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, 2157 may_be_nonzero->get_precision ()); 2158 *may_be_nonzero = *may_be_nonzero | mask; 2159 *must_be_nonzero = must_be_nonzero->and_not (mask); 2160 } 2161 } 2162 2163 return true; 2164} 2165 2166/* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR 2167 so that *VR0 U *VR1 == *AR. Returns true if that is possible, 2168 false otherwise. If *AR can be represented with a single range 2169 *VR1 will be VR_UNDEFINED. */ 2170 2171static bool 2172ranges_from_anti_range (value_range_t *ar, 2173 value_range_t *vr0, value_range_t *vr1) 2174{ 2175 tree type = TREE_TYPE (ar->min); 2176 2177 vr0->type = VR_UNDEFINED; 2178 vr1->type = VR_UNDEFINED; 2179 2180 if (ar->type != VR_ANTI_RANGE 2181 || TREE_CODE (ar->min) != INTEGER_CST 2182 || TREE_CODE (ar->max) != INTEGER_CST 2183 || !vrp_val_min (type) 2184 || !vrp_val_max (type)) 2185 return false; 2186 2187 if (!vrp_val_is_min (ar->min)) 2188 { 2189 vr0->type = VR_RANGE; 2190 vr0->min = vrp_val_min (type); 2191 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1)); 2192 } 2193 if (!vrp_val_is_max (ar->max)) 2194 { 2195 vr1->type = VR_RANGE; 2196 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1)); 2197 vr1->max = vrp_val_max (type); 2198 } 2199 if (vr0->type == VR_UNDEFINED) 2200 { 2201 *vr0 = *vr1; 2202 vr1->type = VR_UNDEFINED; 2203 } 2204 2205 return vr0->type != VR_UNDEFINED; 2206} 2207 2208/* Helper to extract a value-range *VR for a multiplicative operation 2209 *VR0 CODE *VR1. */ 2210 2211static void 2212extract_range_from_multiplicative_op_1 (value_range_t *vr, 2213 enum tree_code code, 2214 value_range_t *vr0, value_range_t *vr1) 2215{ 2216 enum value_range_type type; 2217 tree val[4]; 2218 size_t i; 2219 tree min, max; 2220 bool sop; 2221 int cmp; 2222 2223 /* Multiplications, divisions and shifts are a bit tricky to handle, 2224 depending on the mix of signs we have in the two ranges, we 2225 need to operate on different values to get the minimum and 2226 maximum values for the new range. One approach is to figure 2227 out all the variations of range combinations and do the 2228 operations. 2229 2230 However, this involves several calls to compare_values and it 2231 is pretty convoluted. It's simpler to do the 4 operations 2232 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP 2233 MAX1) and then figure the smallest and largest values to form 2234 the new range. */ 2235 gcc_assert (code == MULT_EXPR 2236 || code == TRUNC_DIV_EXPR 2237 || code == FLOOR_DIV_EXPR 2238 || code == CEIL_DIV_EXPR 2239 || code == EXACT_DIV_EXPR 2240 || code == ROUND_DIV_EXPR 2241 || code == RSHIFT_EXPR 2242 || code == LSHIFT_EXPR); 2243 gcc_assert ((vr0->type == VR_RANGE 2244 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) 2245 && vr0->type == vr1->type); 2246 2247 type = vr0->type; 2248 2249 /* Compute the 4 cross operations. */ 2250 sop = false; 2251 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); 2252 if (val[0] == NULL_TREE) 2253 sop = true; 2254 2255 if (vr1->max == vr1->min) 2256 val[1] = NULL_TREE; 2257 else 2258 { 2259 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); 2260 if (val[1] == NULL_TREE) 2261 sop = true; 2262 } 2263 2264 if (vr0->max == vr0->min) 2265 val[2] = NULL_TREE; 2266 else 2267 { 2268 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); 2269 if (val[2] == NULL_TREE) 2270 sop = true; 2271 } 2272 2273 if (vr0->min == vr0->max || vr1->min == vr1->max) 2274 val[3] = NULL_TREE; 2275 else 2276 { 2277 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); 2278 if (val[3] == NULL_TREE) 2279 sop = true; 2280 } 2281 2282 if (sop) 2283 { 2284 set_value_range_to_varying (vr); 2285 return; 2286 } 2287 2288 /* Set MIN to the minimum of VAL[i] and MAX to the maximum 2289 of VAL[i]. */ 2290 min = val[0]; 2291 max = val[0]; 2292 for (i = 1; i < 4; i++) 2293 { 2294 if (!is_gimple_min_invariant (min) 2295 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2296 || !is_gimple_min_invariant (max) 2297 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2298 break; 2299 2300 if (val[i]) 2301 { 2302 if (!is_gimple_min_invariant (val[i]) 2303 || (TREE_OVERFLOW (val[i]) 2304 && !is_overflow_infinity (val[i]))) 2305 { 2306 /* If we found an overflowed value, set MIN and MAX 2307 to it so that we set the resulting range to 2308 VARYING. */ 2309 min = max = val[i]; 2310 break; 2311 } 2312 2313 if (compare_values (val[i], min) == -1) 2314 min = val[i]; 2315 2316 if (compare_values (val[i], max) == 1) 2317 max = val[i]; 2318 } 2319 } 2320 2321 /* If either MIN or MAX overflowed, then set the resulting range to 2322 VARYING. But we do accept an overflow infinity 2323 representation. */ 2324 if (min == NULL_TREE 2325 || !is_gimple_min_invariant (min) 2326 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2327 || max == NULL_TREE 2328 || !is_gimple_min_invariant (max) 2329 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2330 { 2331 set_value_range_to_varying (vr); 2332 return; 2333 } 2334 2335 /* We punt if: 2336 1) [-INF, +INF] 2337 2) [-INF, +-INF(OVF)] 2338 3) [+-INF(OVF), +INF] 2339 4) [+-INF(OVF), +-INF(OVF)] 2340 We learn nothing when we have INF and INF(OVF) on both sides. 2341 Note that we do accept [-INF, -INF] and [+INF, +INF] without 2342 overflow. */ 2343 if ((vrp_val_is_min (min) || is_overflow_infinity (min)) 2344 && (vrp_val_is_max (max) || is_overflow_infinity (max))) 2345 { 2346 set_value_range_to_varying (vr); 2347 return; 2348 } 2349 2350 cmp = compare_values (min, max); 2351 if (cmp == -2 || cmp == 1) 2352 { 2353 /* If the new range has its limits swapped around (MIN > MAX), 2354 then the operation caused one of them to wrap around, mark 2355 the new range VARYING. */ 2356 set_value_range_to_varying (vr); 2357 } 2358 else 2359 set_value_range (vr, type, min, max, NULL); 2360} 2361 2362/* Extract range information from a binary operation CODE based on 2363 the ranges of each of its operands *VR0 and *VR1 with resulting 2364 type EXPR_TYPE. The resulting range is stored in *VR. */ 2365 2366static void 2367extract_range_from_binary_expr_1 (value_range_t *vr, 2368 enum tree_code code, tree expr_type, 2369 value_range_t *vr0_, value_range_t *vr1_) 2370{ 2371 value_range_t vr0 = *vr0_, vr1 = *vr1_; 2372 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; 2373 enum value_range_type type; 2374 tree min = NULL_TREE, max = NULL_TREE; 2375 int cmp; 2376 2377 if (!INTEGRAL_TYPE_P (expr_type) 2378 && !POINTER_TYPE_P (expr_type)) 2379 { 2380 set_value_range_to_varying (vr); 2381 return; 2382 } 2383 2384 /* Not all binary expressions can be applied to ranges in a 2385 meaningful way. Handle only arithmetic operations. */ 2386 if (code != PLUS_EXPR 2387 && code != MINUS_EXPR 2388 && code != POINTER_PLUS_EXPR 2389 && code != MULT_EXPR 2390 && code != TRUNC_DIV_EXPR 2391 && code != FLOOR_DIV_EXPR 2392 && code != CEIL_DIV_EXPR 2393 && code != EXACT_DIV_EXPR 2394 && code != ROUND_DIV_EXPR 2395 && code != TRUNC_MOD_EXPR 2396 && code != RSHIFT_EXPR 2397 && code != LSHIFT_EXPR 2398 && code != MIN_EXPR 2399 && code != MAX_EXPR 2400 && code != BIT_AND_EXPR 2401 && code != BIT_IOR_EXPR 2402 && code != BIT_XOR_EXPR) 2403 { 2404 set_value_range_to_varying (vr); 2405 return; 2406 } 2407 2408 /* If both ranges are UNDEFINED, so is the result. */ 2409 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) 2410 { 2411 set_value_range_to_undefined (vr); 2412 return; 2413 } 2414 /* If one of the ranges is UNDEFINED drop it to VARYING for the following 2415 code. At some point we may want to special-case operations that 2416 have UNDEFINED result for all or some value-ranges of the not UNDEFINED 2417 operand. */ 2418 else if (vr0.type == VR_UNDEFINED) 2419 set_value_range_to_varying (&vr0); 2420 else if (vr1.type == VR_UNDEFINED) 2421 set_value_range_to_varying (&vr1); 2422 2423 /* Now canonicalize anti-ranges to ranges when they are not symbolic 2424 and express ~[] op X as ([]' op X) U ([]'' op X). */ 2425 if (vr0.type == VR_ANTI_RANGE 2426 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) 2427 { 2428 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); 2429 if (vrtem1.type != VR_UNDEFINED) 2430 { 2431 value_range_t vrres = VR_INITIALIZER; 2432 extract_range_from_binary_expr_1 (&vrres, code, expr_type, 2433 &vrtem1, vr1_); 2434 vrp_meet (vr, &vrres); 2435 } 2436 return; 2437 } 2438 /* Likewise for X op ~[]. */ 2439 if (vr1.type == VR_ANTI_RANGE 2440 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) 2441 { 2442 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); 2443 if (vrtem1.type != VR_UNDEFINED) 2444 { 2445 value_range_t vrres = VR_INITIALIZER; 2446 extract_range_from_binary_expr_1 (&vrres, code, expr_type, 2447 vr0_, &vrtem1); 2448 vrp_meet (vr, &vrres); 2449 } 2450 return; 2451 } 2452 2453 /* The type of the resulting value range defaults to VR0.TYPE. */ 2454 type = vr0.type; 2455 2456 /* Refuse to operate on VARYING ranges, ranges of different kinds 2457 and symbolic ranges. As an exception, we allow BIT_{AND,IOR} 2458 because we may be able to derive a useful range even if one of 2459 the operands is VR_VARYING or symbolic range. Similarly for 2460 divisions, MIN/MAX and PLUS/MINUS. 2461 2462 TODO, we may be able to derive anti-ranges in some cases. */ 2463 if (code != BIT_AND_EXPR 2464 && code != BIT_IOR_EXPR 2465 && code != TRUNC_DIV_EXPR 2466 && code != FLOOR_DIV_EXPR 2467 && code != CEIL_DIV_EXPR 2468 && code != EXACT_DIV_EXPR 2469 && code != ROUND_DIV_EXPR 2470 && code != TRUNC_MOD_EXPR 2471 && code != MIN_EXPR 2472 && code != MAX_EXPR 2473 && code != PLUS_EXPR 2474 && code != MINUS_EXPR 2475 && code != RSHIFT_EXPR 2476 && (vr0.type == VR_VARYING 2477 || vr1.type == VR_VARYING 2478 || vr0.type != vr1.type 2479 || symbolic_range_p (&vr0) 2480 || symbolic_range_p (&vr1))) 2481 { 2482 set_value_range_to_varying (vr); 2483 return; 2484 } 2485 2486 /* Now evaluate the expression to determine the new range. */ 2487 if (POINTER_TYPE_P (expr_type)) 2488 { 2489 if (code == MIN_EXPR || code == MAX_EXPR) 2490 { 2491 /* For MIN/MAX expressions with pointers, we only care about 2492 nullness, if both are non null, then the result is nonnull. 2493 If both are null, then the result is null. Otherwise they 2494 are varying. */ 2495 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) 2496 set_value_range_to_nonnull (vr, expr_type); 2497 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2498 set_value_range_to_null (vr, expr_type); 2499 else 2500 set_value_range_to_varying (vr); 2501 } 2502 else if (code == POINTER_PLUS_EXPR) 2503 { 2504 /* For pointer types, we are really only interested in asserting 2505 whether the expression evaluates to non-NULL. */ 2506 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) 2507 set_value_range_to_nonnull (vr, expr_type); 2508 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2509 set_value_range_to_null (vr, expr_type); 2510 else 2511 set_value_range_to_varying (vr); 2512 } 2513 else if (code == BIT_AND_EXPR) 2514 { 2515 /* For pointer types, we are really only interested in asserting 2516 whether the expression evaluates to non-NULL. */ 2517 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) 2518 set_value_range_to_nonnull (vr, expr_type); 2519 else if (range_is_null (&vr0) || range_is_null (&vr1)) 2520 set_value_range_to_null (vr, expr_type); 2521 else 2522 set_value_range_to_varying (vr); 2523 } 2524 else 2525 set_value_range_to_varying (vr); 2526 2527 return; 2528 } 2529 2530 /* For integer ranges, apply the operation to each end of the 2531 range and see what we end up with. */ 2532 if (code == PLUS_EXPR || code == MINUS_EXPR) 2533 { 2534 const bool minus_p = (code == MINUS_EXPR); 2535 tree min_op0 = vr0.min; 2536 tree min_op1 = minus_p ? vr1.max : vr1.min; 2537 tree max_op0 = vr0.max; 2538 tree max_op1 = minus_p ? vr1.min : vr1.max; 2539 tree sym_min_op0 = NULL_TREE; 2540 tree sym_min_op1 = NULL_TREE; 2541 tree sym_max_op0 = NULL_TREE; 2542 tree sym_max_op1 = NULL_TREE; 2543 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; 2544 2545 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or 2546 single-symbolic ranges, try to compute the precise resulting range, 2547 but only if we know that this resulting range will also be constant 2548 or single-symbolic. */ 2549 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE 2550 && (TREE_CODE (min_op0) == INTEGER_CST 2551 || (sym_min_op0 2552 = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) 2553 && (TREE_CODE (min_op1) == INTEGER_CST 2554 || (sym_min_op1 2555 = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) 2556 && (!(sym_min_op0 && sym_min_op1) 2557 || (sym_min_op0 == sym_min_op1 2558 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) 2559 && (TREE_CODE (max_op0) == INTEGER_CST 2560 || (sym_max_op0 2561 = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) 2562 && (TREE_CODE (max_op1) == INTEGER_CST 2563 || (sym_max_op1 2564 = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) 2565 && (!(sym_max_op0 && sym_max_op1) 2566 || (sym_max_op0 == sym_max_op1 2567 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) 2568 { 2569 const signop sgn = TYPE_SIGN (expr_type); 2570 const unsigned int prec = TYPE_PRECISION (expr_type); 2571 wide_int type_min, type_max, wmin, wmax; 2572 int min_ovf = 0; 2573 int max_ovf = 0; 2574 2575 /* Get the lower and upper bounds of the type. */ 2576 if (TYPE_OVERFLOW_WRAPS (expr_type)) 2577 { 2578 type_min = wi::min_value (prec, sgn); 2579 type_max = wi::max_value (prec, sgn); 2580 } 2581 else 2582 { 2583 type_min = vrp_val_min (expr_type); 2584 type_max = vrp_val_max (expr_type); 2585 } 2586 2587 /* Combine the lower bounds, if any. */ 2588 if (min_op0 && min_op1) 2589 { 2590 if (minus_p) 2591 { 2592 wmin = wi::sub (min_op0, min_op1); 2593 2594 /* Check for overflow. */ 2595 if (wi::cmp (0, min_op1, sgn) 2596 != wi::cmp (wmin, min_op0, sgn)) 2597 min_ovf = wi::cmp (min_op0, min_op1, sgn); 2598 } 2599 else 2600 { 2601 wmin = wi::add (min_op0, min_op1); 2602 2603 /* Check for overflow. */ 2604 if (wi::cmp (min_op1, 0, sgn) 2605 != wi::cmp (wmin, min_op0, sgn)) 2606 min_ovf = wi::cmp (min_op0, wmin, sgn); 2607 } 2608 } 2609 else if (min_op0) 2610 wmin = min_op0; 2611 else if (min_op1) 2612 wmin = minus_p ? wi::neg (min_op1) : min_op1; 2613 else 2614 wmin = wi::shwi (0, prec); 2615 2616 /* Combine the upper bounds, if any. */ 2617 if (max_op0 && max_op1) 2618 { 2619 if (minus_p) 2620 { 2621 wmax = wi::sub (max_op0, max_op1); 2622 2623 /* Check for overflow. */ 2624 if (wi::cmp (0, max_op1, sgn) 2625 != wi::cmp (wmax, max_op0, sgn)) 2626 max_ovf = wi::cmp (max_op0, max_op1, sgn); 2627 } 2628 else 2629 { 2630 wmax = wi::add (max_op0, max_op1); 2631 2632 if (wi::cmp (max_op1, 0, sgn) 2633 != wi::cmp (wmax, max_op0, sgn)) 2634 max_ovf = wi::cmp (max_op0, wmax, sgn); 2635 } 2636 } 2637 else if (max_op0) 2638 wmax = max_op0; 2639 else if (max_op1) 2640 wmax = minus_p ? wi::neg (max_op1) : max_op1; 2641 else 2642 wmax = wi::shwi (0, prec); 2643 2644 /* Check for type overflow. */ 2645 if (min_ovf == 0) 2646 { 2647 if (wi::cmp (wmin, type_min, sgn) == -1) 2648 min_ovf = -1; 2649 else if (wi::cmp (wmin, type_max, sgn) == 1) 2650 min_ovf = 1; 2651 } 2652 if (max_ovf == 0) 2653 { 2654 if (wi::cmp (wmax, type_min, sgn) == -1) 2655 max_ovf = -1; 2656 else if (wi::cmp (wmax, type_max, sgn) == 1) 2657 max_ovf = 1; 2658 } 2659 2660 /* If we have overflow for the constant part and the resulting 2661 range will be symbolic, drop to VR_VARYING. */ 2662 if ((min_ovf && sym_min_op0 != sym_min_op1) 2663 || (max_ovf && sym_max_op0 != sym_max_op1)) 2664 { 2665 set_value_range_to_varying (vr); 2666 return; 2667 } 2668 2669 if (TYPE_OVERFLOW_WRAPS (expr_type)) 2670 { 2671 /* If overflow wraps, truncate the values and adjust the 2672 range kind and bounds appropriately. */ 2673 wide_int tmin = wide_int::from (wmin, prec, sgn); 2674 wide_int tmax = wide_int::from (wmax, prec, sgn); 2675 if (min_ovf == max_ovf) 2676 { 2677 /* No overflow or both overflow or underflow. The 2678 range kind stays VR_RANGE. */ 2679 min = wide_int_to_tree (expr_type, tmin); 2680 max = wide_int_to_tree (expr_type, tmax); 2681 } 2682 else if (min_ovf == -1 && max_ovf == 1) 2683 { 2684 /* Underflow and overflow, drop to VR_VARYING. */ 2685 set_value_range_to_varying (vr); 2686 return; 2687 } 2688 else 2689 { 2690 /* Min underflow or max overflow. The range kind 2691 changes to VR_ANTI_RANGE. */ 2692 bool covers = false; 2693 wide_int tem = tmin; 2694 gcc_assert ((min_ovf == -1 && max_ovf == 0) 2695 || (max_ovf == 1 && min_ovf == 0)); 2696 type = VR_ANTI_RANGE; 2697 tmin = tmax + 1; 2698 if (wi::cmp (tmin, tmax, sgn) < 0) 2699 covers = true; 2700 tmax = tem - 1; 2701 if (wi::cmp (tmax, tem, sgn) > 0) 2702 covers = true; 2703 /* If the anti-range would cover nothing, drop to varying. 2704 Likewise if the anti-range bounds are outside of the 2705 types values. */ 2706 if (covers || wi::cmp (tmin, tmax, sgn) > 0) 2707 { 2708 set_value_range_to_varying (vr); 2709 return; 2710 } 2711 min = wide_int_to_tree (expr_type, tmin); 2712 max = wide_int_to_tree (expr_type, tmax); 2713 } 2714 } 2715 else 2716 { 2717 /* If overflow does not wrap, saturate to the types min/max 2718 value. */ 2719 if (min_ovf == -1) 2720 { 2721 if (needs_overflow_infinity (expr_type) 2722 && supports_overflow_infinity (expr_type)) 2723 min = negative_overflow_infinity (expr_type); 2724 else 2725 min = wide_int_to_tree (expr_type, type_min); 2726 } 2727 else if (min_ovf == 1) 2728 { 2729 if (needs_overflow_infinity (expr_type) 2730 && supports_overflow_infinity (expr_type)) 2731 min = positive_overflow_infinity (expr_type); 2732 else 2733 min = wide_int_to_tree (expr_type, type_max); 2734 } 2735 else 2736 min = wide_int_to_tree (expr_type, wmin); 2737 2738 if (max_ovf == -1) 2739 { 2740 if (needs_overflow_infinity (expr_type) 2741 && supports_overflow_infinity (expr_type)) 2742 max = negative_overflow_infinity (expr_type); 2743 else 2744 max = wide_int_to_tree (expr_type, type_min); 2745 } 2746 else if (max_ovf == 1) 2747 { 2748 if (needs_overflow_infinity (expr_type) 2749 && supports_overflow_infinity (expr_type)) 2750 max = positive_overflow_infinity (expr_type); 2751 else 2752 max = wide_int_to_tree (expr_type, type_max); 2753 } 2754 else 2755 max = wide_int_to_tree (expr_type, wmax); 2756 } 2757 2758 if (needs_overflow_infinity (expr_type) 2759 && supports_overflow_infinity (expr_type)) 2760 { 2761 if ((min_op0 && is_negative_overflow_infinity (min_op0)) 2762 || (min_op1 2763 && (minus_p 2764 ? is_positive_overflow_infinity (min_op1) 2765 : is_negative_overflow_infinity (min_op1)))) 2766 min = negative_overflow_infinity (expr_type); 2767 if ((max_op0 && is_positive_overflow_infinity (max_op0)) 2768 || (max_op1 2769 && (minus_p 2770 ? is_negative_overflow_infinity (max_op1) 2771 : is_positive_overflow_infinity (max_op1)))) 2772 max = positive_overflow_infinity (expr_type); 2773 } 2774 2775 /* If the result lower bound is constant, we're done; 2776 otherwise, build the symbolic lower bound. */ 2777 if (sym_min_op0 == sym_min_op1) 2778 ; 2779 else if (sym_min_op0) 2780 min = build_symbolic_expr (expr_type, sym_min_op0, 2781 neg_min_op0, min); 2782 else if (sym_min_op1) 2783 min = build_symbolic_expr (expr_type, sym_min_op1, 2784 neg_min_op1 ^ minus_p, min); 2785 2786 /* Likewise for the upper bound. */ 2787 if (sym_max_op0 == sym_max_op1) 2788 ; 2789 else if (sym_max_op0) 2790 max = build_symbolic_expr (expr_type, sym_max_op0, 2791 neg_max_op0, max); 2792 else if (sym_max_op1) 2793 max = build_symbolic_expr (expr_type, sym_max_op1, 2794 neg_max_op1 ^ minus_p, max); 2795 } 2796 else 2797 { 2798 /* For other cases, for example if we have a PLUS_EXPR with two 2799 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort 2800 to compute a precise range for such a case. 2801 ??? General even mixed range kind operations can be expressed 2802 by for example transforming ~[3, 5] + [1, 2] to range-only 2803 operations and a union primitive: 2804 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] 2805 [-INF+1, 4] U [6, +INF(OVF)] 2806 though usually the union is not exactly representable with 2807 a single range or anti-range as the above is 2808 [-INF+1, +INF(OVF)] intersected with ~[5, 5] 2809 but one could use a scheme similar to equivalences for this. */ 2810 set_value_range_to_varying (vr); 2811 return; 2812 } 2813 } 2814 else if (code == MIN_EXPR 2815 || code == MAX_EXPR) 2816 { 2817 if (vr0.type == VR_RANGE 2818 && !symbolic_range_p (&vr0)) 2819 { 2820 type = VR_RANGE; 2821 if (vr1.type == VR_RANGE 2822 && !symbolic_range_p (&vr1)) 2823 { 2824 /* For operations that make the resulting range directly 2825 proportional to the original ranges, apply the operation to 2826 the same end of each range. */ 2827 min = vrp_int_const_binop (code, vr0.min, vr1.min); 2828 max = vrp_int_const_binop (code, vr0.max, vr1.max); 2829 } 2830 else if (code == MIN_EXPR) 2831 { 2832 min = vrp_val_min (expr_type); 2833 max = vr0.max; 2834 } 2835 else if (code == MAX_EXPR) 2836 { 2837 min = vr0.min; 2838 max = vrp_val_max (expr_type); 2839 } 2840 } 2841 else if (vr1.type == VR_RANGE 2842 && !symbolic_range_p (&vr1)) 2843 { 2844 type = VR_RANGE; 2845 if (code == MIN_EXPR) 2846 { 2847 min = vrp_val_min (expr_type); 2848 max = vr1.max; 2849 } 2850 else if (code == MAX_EXPR) 2851 { 2852 min = vr1.min; 2853 max = vrp_val_max (expr_type); 2854 } 2855 } 2856 else 2857 { 2858 set_value_range_to_varying (vr); 2859 return; 2860 } 2861 } 2862 else if (code == MULT_EXPR) 2863 { 2864 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not 2865 drop to varying. This test requires 2*prec bits if both 2866 operands are signed and 2*prec + 2 bits if either is not. */ 2867 2868 signop sign = TYPE_SIGN (expr_type); 2869 unsigned int prec = TYPE_PRECISION (expr_type); 2870 2871 if (range_int_cst_p (&vr0) 2872 && range_int_cst_p (&vr1) 2873 && TYPE_OVERFLOW_WRAPS (expr_type)) 2874 { 2875 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; 2876 typedef generic_wide_int 2877 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; 2878 vrp_int sizem1 = wi::mask <vrp_int> (prec, false); 2879 vrp_int size = sizem1 + 1; 2880 2881 /* Extend the values using the sign of the result to PREC2. 2882 From here on out, everthing is just signed math no matter 2883 what the input types were. */ 2884 vrp_int min0 = vrp_int_cst (vr0.min); 2885 vrp_int max0 = vrp_int_cst (vr0.max); 2886 vrp_int min1 = vrp_int_cst (vr1.min); 2887 vrp_int max1 = vrp_int_cst (vr1.max); 2888 /* Canonicalize the intervals. */ 2889 if (sign == UNSIGNED) 2890 { 2891 if (wi::ltu_p (size, min0 + max0)) 2892 { 2893 min0 -= size; 2894 max0 -= size; 2895 } 2896 2897 if (wi::ltu_p (size, min1 + max1)) 2898 { 2899 min1 -= size; 2900 max1 -= size; 2901 } 2902 } 2903 2904 vrp_int prod0 = min0 * min1; 2905 vrp_int prod1 = min0 * max1; 2906 vrp_int prod2 = max0 * min1; 2907 vrp_int prod3 = max0 * max1; 2908 2909 /* Sort the 4 products so that min is in prod0 and max is in 2910 prod3. */ 2911 /* min0min1 > max0max1 */ 2912 if (wi::gts_p (prod0, prod3)) 2913 { 2914 vrp_int tmp = prod3; 2915 prod3 = prod0; 2916 prod0 = tmp; 2917 } 2918 2919 /* min0max1 > max0min1 */ 2920 if (wi::gts_p (prod1, prod2)) 2921 { 2922 vrp_int tmp = prod2; 2923 prod2 = prod1; 2924 prod1 = tmp; 2925 } 2926 2927 if (wi::gts_p (prod0, prod1)) 2928 { 2929 vrp_int tmp = prod1; 2930 prod1 = prod0; 2931 prod0 = tmp; 2932 } 2933 2934 if (wi::gts_p (prod2, prod3)) 2935 { 2936 vrp_int tmp = prod3; 2937 prod3 = prod2; 2938 prod2 = tmp; 2939 } 2940 2941 /* diff = max - min. */ 2942 prod2 = prod3 - prod0; 2943 if (wi::geu_p (prod2, sizem1)) 2944 { 2945 /* the range covers all values. */ 2946 set_value_range_to_varying (vr); 2947 return; 2948 } 2949 2950 /* The following should handle the wrapping and selecting 2951 VR_ANTI_RANGE for us. */ 2952 min = wide_int_to_tree (expr_type, prod0); 2953 max = wide_int_to_tree (expr_type, prod3); 2954 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); 2955 return; 2956 } 2957 2958 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, 2959 drop to VR_VARYING. It would take more effort to compute a 2960 precise range for such a case. For example, if we have 2961 op0 == 65536 and op1 == 65536 with their ranges both being 2962 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so 2963 we cannot claim that the product is in ~[0,0]. Note that we 2964 are guaranteed to have vr0.type == vr1.type at this 2965 point. */ 2966 if (vr0.type == VR_ANTI_RANGE 2967 && !TYPE_OVERFLOW_UNDEFINED (expr_type)) 2968 { 2969 set_value_range_to_varying (vr); 2970 return; 2971 } 2972 2973 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 2974 return; 2975 } 2976 else if (code == RSHIFT_EXPR 2977 || code == LSHIFT_EXPR) 2978 { 2979 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], 2980 then drop to VR_VARYING. Outside of this range we get undefined 2981 behavior from the shift operation. We cannot even trust 2982 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl 2983 shifts, and the operation at the tree level may be widened. */ 2984 if (range_int_cst_p (&vr1) 2985 && compare_tree_int (vr1.min, 0) >= 0 2986 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) 2987 { 2988 if (code == RSHIFT_EXPR) 2989 { 2990 /* Even if vr0 is VARYING or otherwise not usable, we can derive 2991 useful ranges just from the shift count. E.g. 2992 x >> 63 for signed 64-bit x is always [-1, 0]. */ 2993 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) 2994 { 2995 vr0.type = type = VR_RANGE; 2996 vr0.min = vrp_val_min (expr_type); 2997 vr0.max = vrp_val_max (expr_type); 2998 } 2999 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 3000 return; 3001 } 3002 /* We can map lshifts by constants to MULT_EXPR handling. */ 3003 else if (code == LSHIFT_EXPR 3004 && range_int_cst_singleton_p (&vr1)) 3005 { 3006 bool saved_flag_wrapv; 3007 value_range_t vr1p = VR_INITIALIZER; 3008 vr1p.type = VR_RANGE; 3009 vr1p.min = (wide_int_to_tree 3010 (expr_type, 3011 wi::set_bit_in_zero (tree_to_shwi (vr1.min), 3012 TYPE_PRECISION (expr_type)))); 3013 vr1p.max = vr1p.min; 3014 /* We have to use a wrapping multiply though as signed overflow 3015 on lshifts is implementation defined in C89. */ 3016 saved_flag_wrapv = flag_wrapv; 3017 flag_wrapv = 1; 3018 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, 3019 &vr0, &vr1p); 3020 flag_wrapv = saved_flag_wrapv; 3021 return; 3022 } 3023 else if (code == LSHIFT_EXPR 3024 && range_int_cst_p (&vr0)) 3025 { 3026 int prec = TYPE_PRECISION (expr_type); 3027 int overflow_pos = prec; 3028 int bound_shift; 3029 wide_int low_bound, high_bound; 3030 bool uns = TYPE_UNSIGNED (expr_type); 3031 bool in_bounds = false; 3032 3033 if (!uns) 3034 overflow_pos -= 1; 3035 3036 bound_shift = overflow_pos - tree_to_shwi (vr1.max); 3037 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can 3038 overflow. However, for that to happen, vr1.max needs to be 3039 zero, which means vr1 is a singleton range of zero, which 3040 means it should be handled by the previous LSHIFT_EXPR 3041 if-clause. */ 3042 wide_int bound = wi::set_bit_in_zero (bound_shift, prec); 3043 wide_int complement = ~(bound - 1); 3044 3045 if (uns) 3046 { 3047 low_bound = bound; 3048 high_bound = complement; 3049 if (wi::ltu_p (vr0.max, low_bound)) 3050 { 3051 /* [5, 6] << [1, 2] == [10, 24]. */ 3052 /* We're shifting out only zeroes, the value increases 3053 monotonically. */ 3054 in_bounds = true; 3055 } 3056 else if (wi::ltu_p (high_bound, vr0.min)) 3057 { 3058 /* [0xffffff00, 0xffffffff] << [1, 2] 3059 == [0xfffffc00, 0xfffffffe]. */ 3060 /* We're shifting out only ones, the value decreases 3061 monotonically. */ 3062 in_bounds = true; 3063 } 3064 } 3065 else 3066 { 3067 /* [-1, 1] << [1, 2] == [-4, 4]. */ 3068 low_bound = complement; 3069 high_bound = bound; 3070 if (wi::lts_p (vr0.max, high_bound) 3071 && wi::lts_p (low_bound, vr0.min)) 3072 { 3073 /* For non-negative numbers, we're shifting out only 3074 zeroes, the value increases monotonically. 3075 For negative numbers, we're shifting out only ones, the 3076 value decreases monotomically. */ 3077 in_bounds = true; 3078 } 3079 } 3080 3081 if (in_bounds) 3082 { 3083 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 3084 return; 3085 } 3086 } 3087 } 3088 set_value_range_to_varying (vr); 3089 return; 3090 } 3091 else if (code == TRUNC_DIV_EXPR 3092 || code == FLOOR_DIV_EXPR 3093 || code == CEIL_DIV_EXPR 3094 || code == EXACT_DIV_EXPR 3095 || code == ROUND_DIV_EXPR) 3096 { 3097 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) 3098 { 3099 /* For division, if op1 has VR_RANGE but op0 does not, something 3100 can be deduced just from that range. Say [min, max] / [4, max] 3101 gives [min / 4, max / 4] range. */ 3102 if (vr1.type == VR_RANGE 3103 && !symbolic_range_p (&vr1) 3104 && range_includes_zero_p (vr1.min, vr1.max) == 0) 3105 { 3106 vr0.type = type = VR_RANGE; 3107 vr0.min = vrp_val_min (expr_type); 3108 vr0.max = vrp_val_max (expr_type); 3109 } 3110 else 3111 { 3112 set_value_range_to_varying (vr); 3113 return; 3114 } 3115 } 3116 3117 /* For divisions, if flag_non_call_exceptions is true, we must 3118 not eliminate a division by zero. */ 3119 if (cfun->can_throw_non_call_exceptions 3120 && (vr1.type != VR_RANGE 3121 || range_includes_zero_p (vr1.min, vr1.max) != 0)) 3122 { 3123 set_value_range_to_varying (vr); 3124 return; 3125 } 3126 3127 /* For divisions, if op0 is VR_RANGE, we can deduce a range 3128 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can 3129 include 0. */ 3130 if (vr0.type == VR_RANGE 3131 && (vr1.type != VR_RANGE 3132 || range_includes_zero_p (vr1.min, vr1.max) != 0)) 3133 { 3134 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); 3135 int cmp; 3136 3137 min = NULL_TREE; 3138 max = NULL_TREE; 3139 if (TYPE_UNSIGNED (expr_type) 3140 || value_range_nonnegative_p (&vr1)) 3141 { 3142 /* For unsigned division or when divisor is known 3143 to be non-negative, the range has to cover 3144 all numbers from 0 to max for positive max 3145 and all numbers from min to 0 for negative min. */ 3146 cmp = compare_values (vr0.max, zero); 3147 if (cmp == -1) 3148 max = zero; 3149 else if (cmp == 0 || cmp == 1) 3150 max = vr0.max; 3151 else 3152 type = VR_VARYING; 3153 cmp = compare_values (vr0.min, zero); 3154 if (cmp == 1) 3155 min = zero; 3156 else if (cmp == 0 || cmp == -1) 3157 min = vr0.min; 3158 else 3159 type = VR_VARYING; 3160 } 3161 else 3162 { 3163 /* Otherwise the range is -max .. max or min .. -min 3164 depending on which bound is bigger in absolute value, 3165 as the division can change the sign. */ 3166 abs_extent_range (vr, vr0.min, vr0.max); 3167 return; 3168 } 3169 if (type == VR_VARYING) 3170 { 3171 set_value_range_to_varying (vr); 3172 return; 3173 } 3174 } 3175 else 3176 { 3177 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 3178 return; 3179 } 3180 } 3181 else if (code == TRUNC_MOD_EXPR) 3182 { 3183 if (vr1.type != VR_RANGE 3184 || range_includes_zero_p (vr1.min, vr1.max) != 0 3185 || vrp_val_is_min (vr1.min)) 3186 { 3187 set_value_range_to_varying (vr); 3188 return; 3189 } 3190 type = VR_RANGE; 3191 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ 3192 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); 3193 if (tree_int_cst_lt (max, vr1.max)) 3194 max = vr1.max; 3195 max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1)); 3196 /* If the dividend is non-negative the modulus will be 3197 non-negative as well. */ 3198 if (TYPE_UNSIGNED (expr_type) 3199 || value_range_nonnegative_p (&vr0)) 3200 min = build_int_cst (TREE_TYPE (max), 0); 3201 else 3202 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max); 3203 } 3204 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) 3205 { 3206 bool int_cst_range0, int_cst_range1; 3207 wide_int may_be_nonzero0, may_be_nonzero1; 3208 wide_int must_be_nonzero0, must_be_nonzero1; 3209 3210 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, 3211 &may_be_nonzero0, 3212 &must_be_nonzero0); 3213 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, 3214 &may_be_nonzero1, 3215 &must_be_nonzero1); 3216 3217 type = VR_RANGE; 3218 if (code == BIT_AND_EXPR) 3219 { 3220 min = wide_int_to_tree (expr_type, 3221 must_be_nonzero0 & must_be_nonzero1); 3222 wide_int wmax = may_be_nonzero0 & may_be_nonzero1; 3223 /* If both input ranges contain only negative values we can 3224 truncate the result range maximum to the minimum of the 3225 input range maxima. */ 3226 if (int_cst_range0 && int_cst_range1 3227 && tree_int_cst_sgn (vr0.max) < 0 3228 && tree_int_cst_sgn (vr1.max) < 0) 3229 { 3230 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); 3231 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); 3232 } 3233 /* If either input range contains only non-negative values 3234 we can truncate the result range maximum to the respective 3235 maximum of the input range. */ 3236 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) 3237 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); 3238 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) 3239 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); 3240 max = wide_int_to_tree (expr_type, wmax); 3241 } 3242 else if (code == BIT_IOR_EXPR) 3243 { 3244 max = wide_int_to_tree (expr_type, 3245 may_be_nonzero0 | may_be_nonzero1); 3246 wide_int wmin = must_be_nonzero0 | must_be_nonzero1; 3247 /* If the input ranges contain only positive values we can 3248 truncate the minimum of the result range to the maximum 3249 of the input range minima. */ 3250 if (int_cst_range0 && int_cst_range1 3251 && tree_int_cst_sgn (vr0.min) >= 0 3252 && tree_int_cst_sgn (vr1.min) >= 0) 3253 { 3254 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); 3255 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); 3256 } 3257 /* If either input range contains only negative values 3258 we can truncate the minimum of the result range to the 3259 respective minimum range. */ 3260 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) 3261 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); 3262 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) 3263 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); 3264 min = wide_int_to_tree (expr_type, wmin); 3265 } 3266 else if (code == BIT_XOR_EXPR) 3267 { 3268 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) 3269 | ~(may_be_nonzero0 | may_be_nonzero1)); 3270 wide_int result_one_bits 3271 = (must_be_nonzero0.and_not (may_be_nonzero1) 3272 | must_be_nonzero1.and_not (may_be_nonzero0)); 3273 max = wide_int_to_tree (expr_type, ~result_zero_bits); 3274 min = wide_int_to_tree (expr_type, result_one_bits); 3275 /* If the range has all positive or all negative values the 3276 result is better than VARYING. */ 3277 if (tree_int_cst_sgn (min) < 0 3278 || tree_int_cst_sgn (max) >= 0) 3279 ; 3280 else 3281 max = min = NULL_TREE; 3282 } 3283 } 3284 else 3285 gcc_unreachable (); 3286 3287 /* If either MIN or MAX overflowed, then set the resulting range to 3288 VARYING. But we do accept an overflow infinity representation. */ 3289 if (min == NULL_TREE 3290 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min)) 3291 || max == NULL_TREE 3292 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max))) 3293 { 3294 set_value_range_to_varying (vr); 3295 return; 3296 } 3297 3298 /* We punt if: 3299 1) [-INF, +INF] 3300 2) [-INF, +-INF(OVF)] 3301 3) [+-INF(OVF), +INF] 3302 4) [+-INF(OVF), +-INF(OVF)] 3303 We learn nothing when we have INF and INF(OVF) on both sides. 3304 Note that we do accept [-INF, -INF] and [+INF, +INF] without 3305 overflow. */ 3306 if ((vrp_val_is_min (min) || is_overflow_infinity (min)) 3307 && (vrp_val_is_max (max) || is_overflow_infinity (max))) 3308 { 3309 set_value_range_to_varying (vr); 3310 return; 3311 } 3312 3313 cmp = compare_values (min, max); 3314 if (cmp == -2 || cmp == 1) 3315 { 3316 /* If the new range has its limits swapped around (MIN > MAX), 3317 then the operation caused one of them to wrap around, mark 3318 the new range VARYING. */ 3319 set_value_range_to_varying (vr); 3320 } 3321 else 3322 set_value_range (vr, type, min, max, NULL); 3323} 3324 3325/* Extract range information from a binary expression OP0 CODE OP1 based on 3326 the ranges of each of its operands with resulting type EXPR_TYPE. 3327 The resulting range is stored in *VR. */ 3328 3329static void 3330extract_range_from_binary_expr (value_range_t *vr, 3331 enum tree_code code, 3332 tree expr_type, tree op0, tree op1) 3333{ 3334 value_range_t vr0 = VR_INITIALIZER; 3335 value_range_t vr1 = VR_INITIALIZER; 3336 3337 /* Get value ranges for each operand. For constant operands, create 3338 a new value range with the operand to simplify processing. */ 3339 if (TREE_CODE (op0) == SSA_NAME) 3340 vr0 = *(get_value_range (op0)); 3341 else if (is_gimple_min_invariant (op0)) 3342 set_value_range_to_value (&vr0, op0, NULL); 3343 else 3344 set_value_range_to_varying (&vr0); 3345 3346 if (TREE_CODE (op1) == SSA_NAME) 3347 vr1 = *(get_value_range (op1)); 3348 else if (is_gimple_min_invariant (op1)) 3349 set_value_range_to_value (&vr1, op1, NULL); 3350 else 3351 set_value_range_to_varying (&vr1); 3352 3353 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); 3354 3355 /* Try harder for PLUS and MINUS if the range of one operand is symbolic 3356 and based on the other operand, for example if it was deduced from a 3357 symbolic comparison. When a bound of the range of the first operand 3358 is invariant, we set the corresponding bound of the new range to INF 3359 in order to avoid recursing on the range of the second operand. */ 3360 if (vr->type == VR_VARYING 3361 && (code == PLUS_EXPR || code == MINUS_EXPR) 3362 && TREE_CODE (op1) == SSA_NAME 3363 && vr0.type == VR_RANGE 3364 && symbolic_range_based_on_p (&vr0, op1)) 3365 { 3366 const bool minus_p = (code == MINUS_EXPR); 3367 value_range_t n_vr1 = VR_INITIALIZER; 3368 3369 /* Try with VR0 and [-INF, OP1]. */ 3370 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min)) 3371 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL); 3372 3373 /* Try with VR0 and [OP1, +INF]. */ 3374 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max)) 3375 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL); 3376 3377 /* Try with VR0 and [OP1, OP1]. */ 3378 else 3379 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL); 3380 3381 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1); 3382 } 3383 3384 if (vr->type == VR_VARYING 3385 && (code == PLUS_EXPR || code == MINUS_EXPR) 3386 && TREE_CODE (op0) == SSA_NAME 3387 && vr1.type == VR_RANGE 3388 && symbolic_range_based_on_p (&vr1, op0)) 3389 { 3390 const bool minus_p = (code == MINUS_EXPR); 3391 value_range_t n_vr0 = VR_INITIALIZER; 3392 3393 /* Try with [-INF, OP0] and VR1. */ 3394 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min)) 3395 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL); 3396 3397 /* Try with [OP0, +INF] and VR1. */ 3398 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max)) 3399 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL); 3400 3401 /* Try with [OP0, OP0] and VR1. */ 3402 else 3403 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL); 3404 3405 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); 3406 } 3407} 3408 3409/* Extract range information from a unary operation CODE based on 3410 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. 3411 The The resulting range is stored in *VR. */ 3412 3413static void 3414extract_range_from_unary_expr_1 (value_range_t *vr, 3415 enum tree_code code, tree type, 3416 value_range_t *vr0_, tree op0_type) 3417{ 3418 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; 3419 3420 /* VRP only operates on integral and pointer types. */ 3421 if (!(INTEGRAL_TYPE_P (op0_type) 3422 || POINTER_TYPE_P (op0_type)) 3423 || !(INTEGRAL_TYPE_P (type) 3424 || POINTER_TYPE_P (type))) 3425 { 3426 set_value_range_to_varying (vr); 3427 return; 3428 } 3429 3430 /* If VR0 is UNDEFINED, so is the result. */ 3431 if (vr0.type == VR_UNDEFINED) 3432 { 3433 set_value_range_to_undefined (vr); 3434 return; 3435 } 3436 3437 /* Handle operations that we express in terms of others. */ 3438 if (code == PAREN_EXPR || code == OBJ_TYPE_REF) 3439 { 3440 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ 3441 copy_value_range (vr, &vr0); 3442 return; 3443 } 3444 else if (code == NEGATE_EXPR) 3445 { 3446 /* -X is simply 0 - X, so re-use existing code that also handles 3447 anti-ranges fine. */ 3448 value_range_t zero = VR_INITIALIZER; 3449 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); 3450 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); 3451 return; 3452 } 3453 else if (code == BIT_NOT_EXPR) 3454 { 3455 /* ~X is simply -1 - X, so re-use existing code that also handles 3456 anti-ranges fine. */ 3457 value_range_t minusone = VR_INITIALIZER; 3458 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); 3459 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, 3460 type, &minusone, &vr0); 3461 return; 3462 } 3463 3464 /* Now canonicalize anti-ranges to ranges when they are not symbolic 3465 and express op ~[] as (op []') U (op []''). */ 3466 if (vr0.type == VR_ANTI_RANGE 3467 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) 3468 { 3469 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type); 3470 if (vrtem1.type != VR_UNDEFINED) 3471 { 3472 value_range_t vrres = VR_INITIALIZER; 3473 extract_range_from_unary_expr_1 (&vrres, code, type, 3474 &vrtem1, op0_type); 3475 vrp_meet (vr, &vrres); 3476 } 3477 return; 3478 } 3479 3480 if (CONVERT_EXPR_CODE_P (code)) 3481 { 3482 tree inner_type = op0_type; 3483 tree outer_type = type; 3484 3485 /* If the expression evaluates to a pointer, we are only interested in 3486 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ 3487 if (POINTER_TYPE_P (type)) 3488 { 3489 if (range_is_nonnull (&vr0)) 3490 set_value_range_to_nonnull (vr, type); 3491 else if (range_is_null (&vr0)) 3492 set_value_range_to_null (vr, type); 3493 else 3494 set_value_range_to_varying (vr); 3495 return; 3496 } 3497 3498 /* If VR0 is varying and we increase the type precision, assume 3499 a full range for the following transformation. */ 3500 if (vr0.type == VR_VARYING 3501 && INTEGRAL_TYPE_P (inner_type) 3502 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) 3503 { 3504 vr0.type = VR_RANGE; 3505 vr0.min = TYPE_MIN_VALUE (inner_type); 3506 vr0.max = TYPE_MAX_VALUE (inner_type); 3507 } 3508 3509 /* If VR0 is a constant range or anti-range and the conversion is 3510 not truncating we can convert the min and max values and 3511 canonicalize the resulting range. Otherwise we can do the 3512 conversion if the size of the range is less than what the 3513 precision of the target type can represent and the range is 3514 not an anti-range. */ 3515 if ((vr0.type == VR_RANGE 3516 || vr0.type == VR_ANTI_RANGE) 3517 && TREE_CODE (vr0.min) == INTEGER_CST 3518 && TREE_CODE (vr0.max) == INTEGER_CST 3519 && (!is_overflow_infinity (vr0.min) 3520 || (vr0.type == VR_RANGE 3521 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 3522 && needs_overflow_infinity (outer_type) 3523 && supports_overflow_infinity (outer_type))) 3524 && (!is_overflow_infinity (vr0.max) 3525 || (vr0.type == VR_RANGE 3526 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 3527 && needs_overflow_infinity (outer_type) 3528 && supports_overflow_infinity (outer_type))) 3529 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) 3530 || (vr0.type == VR_RANGE 3531 && integer_zerop (int_const_binop (RSHIFT_EXPR, 3532 int_const_binop (MINUS_EXPR, vr0.max, vr0.min), 3533 size_int (TYPE_PRECISION (outer_type))))))) 3534 { 3535 tree new_min, new_max; 3536 if (is_overflow_infinity (vr0.min)) 3537 new_min = negative_overflow_infinity (outer_type); 3538 else 3539 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), 3540 0, false); 3541 if (is_overflow_infinity (vr0.max)) 3542 new_max = positive_overflow_infinity (outer_type); 3543 else 3544 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), 3545 0, false); 3546 set_and_canonicalize_value_range (vr, vr0.type, 3547 new_min, new_max, NULL); 3548 return; 3549 } 3550 3551 set_value_range_to_varying (vr); 3552 return; 3553 } 3554 else if (code == ABS_EXPR) 3555 { 3556 tree min, max; 3557 int cmp; 3558 3559 /* Pass through vr0 in the easy cases. */ 3560 if (TYPE_UNSIGNED (type) 3561 || value_range_nonnegative_p (&vr0)) 3562 { 3563 copy_value_range (vr, &vr0); 3564 return; 3565 } 3566 3567 /* For the remaining varying or symbolic ranges we can't do anything 3568 useful. */ 3569 if (vr0.type == VR_VARYING 3570 || symbolic_range_p (&vr0)) 3571 { 3572 set_value_range_to_varying (vr); 3573 return; 3574 } 3575 3576 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a 3577 useful range. */ 3578 if (!TYPE_OVERFLOW_UNDEFINED (type) 3579 && ((vr0.type == VR_RANGE 3580 && vrp_val_is_min (vr0.min)) 3581 || (vr0.type == VR_ANTI_RANGE 3582 && !vrp_val_is_min (vr0.min)))) 3583 { 3584 set_value_range_to_varying (vr); 3585 return; 3586 } 3587 3588 /* ABS_EXPR may flip the range around, if the original range 3589 included negative values. */ 3590 if (is_overflow_infinity (vr0.min)) 3591 min = positive_overflow_infinity (type); 3592 else if (!vrp_val_is_min (vr0.min)) 3593 min = fold_unary_to_constant (code, type, vr0.min); 3594 else if (!needs_overflow_infinity (type)) 3595 min = TYPE_MAX_VALUE (type); 3596 else if (supports_overflow_infinity (type)) 3597 min = positive_overflow_infinity (type); 3598 else 3599 { 3600 set_value_range_to_varying (vr); 3601 return; 3602 } 3603 3604 if (is_overflow_infinity (vr0.max)) 3605 max = positive_overflow_infinity (type); 3606 else if (!vrp_val_is_min (vr0.max)) 3607 max = fold_unary_to_constant (code, type, vr0.max); 3608 else if (!needs_overflow_infinity (type)) 3609 max = TYPE_MAX_VALUE (type); 3610 else if (supports_overflow_infinity (type) 3611 /* We shouldn't generate [+INF, +INF] as set_value_range 3612 doesn't like this and ICEs. */ 3613 && !is_positive_overflow_infinity (min)) 3614 max = positive_overflow_infinity (type); 3615 else 3616 { 3617 set_value_range_to_varying (vr); 3618 return; 3619 } 3620 3621 cmp = compare_values (min, max); 3622 3623 /* If a VR_ANTI_RANGEs contains zero, then we have 3624 ~[-INF, min(MIN, MAX)]. */ 3625 if (vr0.type == VR_ANTI_RANGE) 3626 { 3627 if (range_includes_zero_p (vr0.min, vr0.max) == 1) 3628 { 3629 /* Take the lower of the two values. */ 3630 if (cmp != 1) 3631 max = min; 3632 3633 /* Create ~[-INF, min (abs(MIN), abs(MAX))] 3634 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when 3635 flag_wrapv is set and the original anti-range doesn't include 3636 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ 3637 if (TYPE_OVERFLOW_WRAPS (type)) 3638 { 3639 tree type_min_value = TYPE_MIN_VALUE (type); 3640 3641 min = (vr0.min != type_min_value 3642 ? int_const_binop (PLUS_EXPR, type_min_value, 3643 build_int_cst (TREE_TYPE (type_min_value), 1)) 3644 : type_min_value); 3645 } 3646 else 3647 { 3648 if (overflow_infinity_range_p (&vr0)) 3649 min = negative_overflow_infinity (type); 3650 else 3651 min = TYPE_MIN_VALUE (type); 3652 } 3653 } 3654 else 3655 { 3656 /* All else has failed, so create the range [0, INF], even for 3657 flag_wrapv since TYPE_MIN_VALUE is in the original 3658 anti-range. */ 3659 vr0.type = VR_RANGE; 3660 min = build_int_cst (type, 0); 3661 if (needs_overflow_infinity (type)) 3662 { 3663 if (supports_overflow_infinity (type)) 3664 max = positive_overflow_infinity (type); 3665 else 3666 { 3667 set_value_range_to_varying (vr); 3668 return; 3669 } 3670 } 3671 else 3672 max = TYPE_MAX_VALUE (type); 3673 } 3674 } 3675 3676 /* If the range contains zero then we know that the minimum value in the 3677 range will be zero. */ 3678 else if (range_includes_zero_p (vr0.min, vr0.max) == 1) 3679 { 3680 if (cmp == 1) 3681 max = min; 3682 min = build_int_cst (type, 0); 3683 } 3684 else 3685 { 3686 /* If the range was reversed, swap MIN and MAX. */ 3687 if (cmp == 1) 3688 { 3689 tree t = min; 3690 min = max; 3691 max = t; 3692 } 3693 } 3694 3695 cmp = compare_values (min, max); 3696 if (cmp == -2 || cmp == 1) 3697 { 3698 /* If the new range has its limits swapped around (MIN > MAX), 3699 then the operation caused one of them to wrap around, mark 3700 the new range VARYING. */ 3701 set_value_range_to_varying (vr); 3702 } 3703 else 3704 set_value_range (vr, vr0.type, min, max, NULL); 3705 return; 3706 } 3707 3708 /* For unhandled operations fall back to varying. */ 3709 set_value_range_to_varying (vr); 3710 return; 3711} 3712 3713 3714/* Extract range information from a unary expression CODE OP0 based on 3715 the range of its operand with resulting type TYPE. 3716 The resulting range is stored in *VR. */ 3717 3718static void 3719extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, 3720 tree type, tree op0) 3721{ 3722 value_range_t vr0 = VR_INITIALIZER; 3723 3724 /* Get value ranges for the operand. For constant operands, create 3725 a new value range with the operand to simplify processing. */ 3726 if (TREE_CODE (op0) == SSA_NAME) 3727 vr0 = *(get_value_range (op0)); 3728 else if (is_gimple_min_invariant (op0)) 3729 set_value_range_to_value (&vr0, op0, NULL); 3730 else 3731 set_value_range_to_varying (&vr0); 3732 3733 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); 3734} 3735 3736 3737/* Extract range information from a conditional expression STMT based on 3738 the ranges of each of its operands and the expression code. */ 3739 3740static void 3741extract_range_from_cond_expr (value_range_t *vr, gassign *stmt) 3742{ 3743 tree op0, op1; 3744 value_range_t vr0 = VR_INITIALIZER; 3745 value_range_t vr1 = VR_INITIALIZER; 3746 3747 /* Get value ranges for each operand. For constant operands, create 3748 a new value range with the operand to simplify processing. */ 3749 op0 = gimple_assign_rhs2 (stmt); 3750 if (TREE_CODE (op0) == SSA_NAME) 3751 vr0 = *(get_value_range (op0)); 3752 else if (is_gimple_min_invariant (op0)) 3753 set_value_range_to_value (&vr0, op0, NULL); 3754 else 3755 set_value_range_to_varying (&vr0); 3756 3757 op1 = gimple_assign_rhs3 (stmt); 3758 if (TREE_CODE (op1) == SSA_NAME) 3759 vr1 = *(get_value_range (op1)); 3760 else if (is_gimple_min_invariant (op1)) 3761 set_value_range_to_value (&vr1, op1, NULL); 3762 else 3763 set_value_range_to_varying (&vr1); 3764 3765 /* The resulting value range is the union of the operand ranges */ 3766 copy_value_range (vr, &vr0); 3767 vrp_meet (vr, &vr1); 3768} 3769 3770 3771/* Extract range information from a comparison expression EXPR based 3772 on the range of its operand and the expression code. */ 3773 3774static void 3775extract_range_from_comparison (value_range_t *vr, enum tree_code code, 3776 tree type, tree op0, tree op1) 3777{ 3778 bool sop = false; 3779 tree val; 3780 3781 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, 3782 NULL); 3783 3784 /* A disadvantage of using a special infinity as an overflow 3785 representation is that we lose the ability to record overflow 3786 when we don't have an infinity. So we have to ignore a result 3787 which relies on overflow. */ 3788 3789 if (val && !is_overflow_infinity (val) && !sop) 3790 { 3791 /* Since this expression was found on the RHS of an assignment, 3792 its type may be different from _Bool. Convert VAL to EXPR's 3793 type. */ 3794 val = fold_convert (type, val); 3795 if (is_gimple_min_invariant (val)) 3796 set_value_range_to_value (vr, val, vr->equiv); 3797 else 3798 set_value_range (vr, VR_RANGE, val, val, vr->equiv); 3799 } 3800 else 3801 /* The result of a comparison is always true or false. */ 3802 set_value_range_to_truthvalue (vr, type); 3803} 3804 3805/* Helper function for simplify_internal_call_using_ranges and 3806 extract_range_basic. Return true if OP0 SUBCODE OP1 for 3807 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or 3808 always overflow. Set *OVF to true if it is known to always 3809 overflow. */ 3810 3811static bool 3812check_for_binary_op_overflow (enum tree_code subcode, tree type, 3813 tree op0, tree op1, bool *ovf) 3814{ 3815 value_range_t vr0 = VR_INITIALIZER; 3816 value_range_t vr1 = VR_INITIALIZER; 3817 if (TREE_CODE (op0) == SSA_NAME) 3818 vr0 = *get_value_range (op0); 3819 else if (TREE_CODE (op0) == INTEGER_CST) 3820 set_value_range_to_value (&vr0, op0, NULL); 3821 else 3822 set_value_range_to_varying (&vr0); 3823 3824 if (TREE_CODE (op1) == SSA_NAME) 3825 vr1 = *get_value_range (op1); 3826 else if (TREE_CODE (op1) == INTEGER_CST) 3827 set_value_range_to_value (&vr1, op1, NULL); 3828 else 3829 set_value_range_to_varying (&vr1); 3830 3831 if (!range_int_cst_p (&vr0) 3832 || TREE_OVERFLOW (vr0.min) 3833 || TREE_OVERFLOW (vr0.max)) 3834 { 3835 vr0.min = vrp_val_min (TREE_TYPE (op0)); 3836 vr0.max = vrp_val_max (TREE_TYPE (op0)); 3837 } 3838 if (!range_int_cst_p (&vr1) 3839 || TREE_OVERFLOW (vr1.min) 3840 || TREE_OVERFLOW (vr1.max)) 3841 { 3842 vr1.min = vrp_val_min (TREE_TYPE (op1)); 3843 vr1.max = vrp_val_max (TREE_TYPE (op1)); 3844 } 3845 *ovf = arith_overflowed_p (subcode, type, vr0.min, 3846 subcode == MINUS_EXPR ? vr1.max : vr1.min); 3847 if (arith_overflowed_p (subcode, type, vr0.max, 3848 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf) 3849 return false; 3850 if (subcode == MULT_EXPR) 3851 { 3852 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf 3853 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf) 3854 return false; 3855 } 3856 if (*ovf) 3857 { 3858 /* So far we found that there is an overflow on the boundaries. 3859 That doesn't prove that there is an overflow even for all values 3860 in between the boundaries. For that compute widest_int range 3861 of the result and see if it doesn't overlap the range of 3862 type. */ 3863 widest_int wmin, wmax; 3864 widest_int w[4]; 3865 int i; 3866 w[0] = wi::to_widest (vr0.min); 3867 w[1] = wi::to_widest (vr0.max); 3868 w[2] = wi::to_widest (vr1.min); 3869 w[3] = wi::to_widest (vr1.max); 3870 for (i = 0; i < 4; i++) 3871 { 3872 widest_int wt; 3873 switch (subcode) 3874 { 3875 case PLUS_EXPR: 3876 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]); 3877 break; 3878 case MINUS_EXPR: 3879 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]); 3880 break; 3881 case MULT_EXPR: 3882 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]); 3883 break; 3884 default: 3885 gcc_unreachable (); 3886 } 3887 if (i == 0) 3888 { 3889 wmin = wt; 3890 wmax = wt; 3891 } 3892 else 3893 { 3894 wmin = wi::smin (wmin, wt); 3895 wmax = wi::smax (wmax, wt); 3896 } 3897 } 3898 /* The result of op0 CODE op1 is known to be in range 3899 [wmin, wmax]. */ 3900 widest_int wtmin = wi::to_widest (vrp_val_min (type)); 3901 widest_int wtmax = wi::to_widest (vrp_val_max (type)); 3902 /* If all values in [wmin, wmax] are smaller than 3903 [wtmin, wtmax] or all are larger than [wtmin, wtmax], 3904 the arithmetic operation will always overflow. */ 3905 if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax)) 3906 return true; 3907 return false; 3908 } 3909 return true; 3910} 3911 3912/* Try to derive a nonnegative or nonzero range out of STMT relying 3913 primarily on generic routines in fold in conjunction with range data. 3914 Store the result in *VR */ 3915 3916static void 3917extract_range_basic (value_range_t *vr, gimple stmt) 3918{ 3919 bool sop = false; 3920 tree type = gimple_expr_type (stmt); 3921 3922 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) 3923 { 3924 tree fndecl = gimple_call_fndecl (stmt), arg; 3925 int mini, maxi, zerov = 0, prec; 3926 3927 switch (DECL_FUNCTION_CODE (fndecl)) 3928 { 3929 case BUILT_IN_CONSTANT_P: 3930 /* If the call is __builtin_constant_p and the argument is a 3931 function parameter resolve it to false. This avoids bogus 3932 array bound warnings. 3933 ??? We could do this as early as inlining is finished. */ 3934 arg = gimple_call_arg (stmt, 0); 3935 if (TREE_CODE (arg) == SSA_NAME 3936 && SSA_NAME_IS_DEFAULT_DEF (arg) 3937 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) 3938 { 3939 set_value_range_to_null (vr, type); 3940 return; 3941 } 3942 break; 3943 /* Both __builtin_ffs* and __builtin_popcount return 3944 [0, prec]. */ 3945 CASE_INT_FN (BUILT_IN_FFS): 3946 CASE_INT_FN (BUILT_IN_POPCOUNT): 3947 arg = gimple_call_arg (stmt, 0); 3948 prec = TYPE_PRECISION (TREE_TYPE (arg)); 3949 mini = 0; 3950 maxi = prec; 3951 if (TREE_CODE (arg) == SSA_NAME) 3952 { 3953 value_range_t *vr0 = get_value_range (arg); 3954 /* If arg is non-zero, then ffs or popcount 3955 are non-zero. */ 3956 if (((vr0->type == VR_RANGE 3957 && range_includes_zero_p (vr0->min, vr0->max) == 0) 3958 || (vr0->type == VR_ANTI_RANGE 3959 && range_includes_zero_p (vr0->min, vr0->max) == 1)) 3960 && !is_overflow_infinity (vr0->min) 3961 && !is_overflow_infinity (vr0->max)) 3962 mini = 1; 3963 /* If some high bits are known to be zero, 3964 we can decrease the maximum. */ 3965 if (vr0->type == VR_RANGE 3966 && TREE_CODE (vr0->max) == INTEGER_CST 3967 && !operand_less_p (vr0->min, 3968 build_zero_cst (TREE_TYPE (vr0->min))) 3969 && !is_overflow_infinity (vr0->max)) 3970 maxi = tree_floor_log2 (vr0->max) + 1; 3971 } 3972 goto bitop_builtin; 3973 /* __builtin_parity* returns [0, 1]. */ 3974 CASE_INT_FN (BUILT_IN_PARITY): 3975 mini = 0; 3976 maxi = 1; 3977 goto bitop_builtin; 3978 /* __builtin_c[lt]z* return [0, prec-1], except for 3979 when the argument is 0, but that is undefined behavior. 3980 On many targets where the CLZ RTL or optab value is defined 3981 for 0 the value is prec, so include that in the range 3982 by default. */ 3983 CASE_INT_FN (BUILT_IN_CLZ): 3984 arg = gimple_call_arg (stmt, 0); 3985 prec = TYPE_PRECISION (TREE_TYPE (arg)); 3986 mini = 0; 3987 maxi = prec; 3988 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg))) 3989 != CODE_FOR_nothing 3990 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), 3991 zerov) 3992 /* Handle only the single common value. */ 3993 && zerov != prec) 3994 /* Magic value to give up, unless vr0 proves 3995 arg is non-zero. */ 3996 mini = -2; 3997 if (TREE_CODE (arg) == SSA_NAME) 3998 { 3999 value_range_t *vr0 = get_value_range (arg); 4000 /* From clz of VR_RANGE minimum we can compute 4001 result maximum. */ 4002 if (vr0->type == VR_RANGE 4003 && TREE_CODE (vr0->min) == INTEGER_CST 4004 && !is_overflow_infinity (vr0->min)) 4005 { 4006 maxi = prec - 1 - tree_floor_log2 (vr0->min); 4007 if (maxi != prec) 4008 mini = 0; 4009 } 4010 else if (vr0->type == VR_ANTI_RANGE 4011 && integer_zerop (vr0->min) 4012 && !is_overflow_infinity (vr0->min)) 4013 { 4014 maxi = prec - 1; 4015 mini = 0; 4016 } 4017 if (mini == -2) 4018 break; 4019 /* From clz of VR_RANGE maximum we can compute 4020 result minimum. */ 4021 if (vr0->type == VR_RANGE 4022 && TREE_CODE (vr0->max) == INTEGER_CST 4023 && !is_overflow_infinity (vr0->max)) 4024 { 4025 mini = prec - 1 - tree_floor_log2 (vr0->max); 4026 if (mini == prec) 4027 break; 4028 } 4029 } 4030 if (mini == -2) 4031 break; 4032 goto bitop_builtin; 4033 /* __builtin_ctz* return [0, prec-1], except for 4034 when the argument is 0, but that is undefined behavior. 4035 If there is a ctz optab for this mode and 4036 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range, 4037 otherwise just assume 0 won't be seen. */ 4038 CASE_INT_FN (BUILT_IN_CTZ): 4039 arg = gimple_call_arg (stmt, 0); 4040 prec = TYPE_PRECISION (TREE_TYPE (arg)); 4041 mini = 0; 4042 maxi = prec - 1; 4043 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg))) 4044 != CODE_FOR_nothing 4045 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), 4046 zerov)) 4047 { 4048 /* Handle only the two common values. */ 4049 if (zerov == -1) 4050 mini = -1; 4051 else if (zerov == prec) 4052 maxi = prec; 4053 else 4054 /* Magic value to give up, unless vr0 proves 4055 arg is non-zero. */ 4056 mini = -2; 4057 } 4058 if (TREE_CODE (arg) == SSA_NAME) 4059 { 4060 value_range_t *vr0 = get_value_range (arg); 4061 /* If arg is non-zero, then use [0, prec - 1]. */ 4062 if (((vr0->type == VR_RANGE 4063 && integer_nonzerop (vr0->min)) 4064 || (vr0->type == VR_ANTI_RANGE 4065 && integer_zerop (vr0->min))) 4066 && !is_overflow_infinity (vr0->min)) 4067 { 4068 mini = 0; 4069 maxi = prec - 1; 4070 } 4071 /* If some high bits are known to be zero, 4072 we can decrease the result maximum. */ 4073 if (vr0->type == VR_RANGE 4074 && TREE_CODE (vr0->max) == INTEGER_CST 4075 && !is_overflow_infinity (vr0->max)) 4076 { 4077 maxi = tree_floor_log2 (vr0->max); 4078 /* For vr0 [0, 0] give up. */ 4079 if (maxi == -1) 4080 break; 4081 } 4082 } 4083 if (mini == -2) 4084 break; 4085 goto bitop_builtin; 4086 /* __builtin_clrsb* returns [0, prec-1]. */ 4087 CASE_INT_FN (BUILT_IN_CLRSB): 4088 arg = gimple_call_arg (stmt, 0); 4089 prec = TYPE_PRECISION (TREE_TYPE (arg)); 4090 mini = 0; 4091 maxi = prec - 1; 4092 goto bitop_builtin; 4093 bitop_builtin: 4094 set_value_range (vr, VR_RANGE, build_int_cst (type, mini), 4095 build_int_cst (type, maxi), NULL); 4096 return; 4097 default: 4098 break; 4099 } 4100 } 4101 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) 4102 { 4103 enum tree_code subcode = ERROR_MARK; 4104 switch (gimple_call_internal_fn (stmt)) 4105 { 4106 case IFN_UBSAN_CHECK_ADD: 4107 subcode = PLUS_EXPR; 4108 break; 4109 case IFN_UBSAN_CHECK_SUB: 4110 subcode = MINUS_EXPR; 4111 break; 4112 case IFN_UBSAN_CHECK_MUL: 4113 subcode = MULT_EXPR; 4114 break; 4115 default: 4116 break; 4117 } 4118 if (subcode != ERROR_MARK) 4119 { 4120 bool saved_flag_wrapv = flag_wrapv; 4121 /* Pretend the arithmetics is wrapping. If there is 4122 any overflow, we'll complain, but will actually do 4123 wrapping operation. */ 4124 flag_wrapv = 1; 4125 extract_range_from_binary_expr (vr, subcode, type, 4126 gimple_call_arg (stmt, 0), 4127 gimple_call_arg (stmt, 1)); 4128 flag_wrapv = saved_flag_wrapv; 4129 4130 /* If for both arguments vrp_valueize returned non-NULL, 4131 this should have been already folded and if not, it 4132 wasn't folded because of overflow. Avoid removing the 4133 UBSAN_CHECK_* calls in that case. */ 4134 if (vr->type == VR_RANGE 4135 && (vr->min == vr->max 4136 || operand_equal_p (vr->min, vr->max, 0))) 4137 set_value_range_to_varying (vr); 4138 return; 4139 } 4140 } 4141 /* Handle extraction of the two results (result of arithmetics and 4142 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW 4143 internal function. */ 4144 else if (is_gimple_assign (stmt) 4145 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR 4146 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR) 4147 && INTEGRAL_TYPE_P (type)) 4148 { 4149 enum tree_code code = gimple_assign_rhs_code (stmt); 4150 tree op = gimple_assign_rhs1 (stmt); 4151 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME) 4152 { 4153 gimple g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0)); 4154 if (is_gimple_call (g) && gimple_call_internal_p (g)) 4155 { 4156 enum tree_code subcode = ERROR_MARK; 4157 switch (gimple_call_internal_fn (g)) 4158 { 4159 case IFN_ADD_OVERFLOW: 4160 subcode = PLUS_EXPR; 4161 break; 4162 case IFN_SUB_OVERFLOW: 4163 subcode = MINUS_EXPR; 4164 break; 4165 case IFN_MUL_OVERFLOW: 4166 subcode = MULT_EXPR; 4167 break; 4168 default: 4169 break; 4170 } 4171 if (subcode != ERROR_MARK) 4172 { 4173 tree op0 = gimple_call_arg (g, 0); 4174 tree op1 = gimple_call_arg (g, 1); 4175 if (code == IMAGPART_EXPR) 4176 { 4177 bool ovf = false; 4178 if (check_for_binary_op_overflow (subcode, type, 4179 op0, op1, &ovf)) 4180 set_value_range_to_value (vr, 4181 build_int_cst (type, ovf), 4182 NULL); 4183 else 4184 set_value_range (vr, VR_RANGE, build_int_cst (type, 0), 4185 build_int_cst (type, 1), NULL); 4186 } 4187 else if (types_compatible_p (type, TREE_TYPE (op0)) 4188 && types_compatible_p (type, TREE_TYPE (op1))) 4189 { 4190 bool saved_flag_wrapv = flag_wrapv; 4191 /* Pretend the arithmetics is wrapping. If there is 4192 any overflow, IMAGPART_EXPR will be set. */ 4193 flag_wrapv = 1; 4194 extract_range_from_binary_expr (vr, subcode, type, 4195 op0, op1); 4196 flag_wrapv = saved_flag_wrapv; 4197 } 4198 else 4199 { 4200 value_range_t vr0 = VR_INITIALIZER; 4201 value_range_t vr1 = VR_INITIALIZER; 4202 bool saved_flag_wrapv = flag_wrapv; 4203 /* Pretend the arithmetics is wrapping. If there is 4204 any overflow, IMAGPART_EXPR will be set. */ 4205 flag_wrapv = 1; 4206 extract_range_from_unary_expr (&vr0, NOP_EXPR, 4207 type, op0); 4208 extract_range_from_unary_expr (&vr1, NOP_EXPR, 4209 type, op1); 4210 extract_range_from_binary_expr_1 (vr, subcode, type, 4211 &vr0, &vr1); 4212 flag_wrapv = saved_flag_wrapv; 4213 } 4214 return; 4215 } 4216 } 4217 } 4218 } 4219 if (INTEGRAL_TYPE_P (type) 4220 && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) 4221 set_value_range_to_nonnegative (vr, type, 4222 sop || stmt_overflow_infinity (stmt)); 4223 else if (vrp_stmt_computes_nonzero (stmt, &sop) 4224 && !sop) 4225 set_value_range_to_nonnull (vr, type); 4226 else 4227 set_value_range_to_varying (vr); 4228} 4229 4230 4231/* Try to compute a useful range out of assignment STMT and store it 4232 in *VR. */ 4233 4234static void 4235extract_range_from_assignment (value_range_t *vr, gassign *stmt) 4236{ 4237 enum tree_code code = gimple_assign_rhs_code (stmt); 4238 4239 if (code == ASSERT_EXPR) 4240 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); 4241 else if (code == SSA_NAME) 4242 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); 4243 else if (TREE_CODE_CLASS (code) == tcc_binary) 4244 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), 4245 gimple_expr_type (stmt), 4246 gimple_assign_rhs1 (stmt), 4247 gimple_assign_rhs2 (stmt)); 4248 else if (TREE_CODE_CLASS (code) == tcc_unary) 4249 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), 4250 gimple_expr_type (stmt), 4251 gimple_assign_rhs1 (stmt)); 4252 else if (code == COND_EXPR) 4253 extract_range_from_cond_expr (vr, stmt); 4254 else if (TREE_CODE_CLASS (code) == tcc_comparison) 4255 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), 4256 gimple_expr_type (stmt), 4257 gimple_assign_rhs1 (stmt), 4258 gimple_assign_rhs2 (stmt)); 4259 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS 4260 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) 4261 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); 4262 else 4263 set_value_range_to_varying (vr); 4264 4265 if (vr->type == VR_VARYING) 4266 extract_range_basic (vr, stmt); 4267} 4268 4269/* Given a range VR, a LOOP and a variable VAR, determine whether it 4270 would be profitable to adjust VR using scalar evolution information 4271 for VAR. If so, update VR with the new limits. */ 4272 4273static void 4274adjust_range_with_scev (value_range_t *vr, struct loop *loop, 4275 gimple stmt, tree var) 4276{ 4277 tree init, step, chrec, tmin, tmax, min, max, type, tem; 4278 enum ev_direction dir; 4279 4280 /* TODO. Don't adjust anti-ranges. An anti-range may provide 4281 better opportunities than a regular range, but I'm not sure. */ 4282 if (vr->type == VR_ANTI_RANGE) 4283 return; 4284 4285 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); 4286 4287 /* Like in PR19590, scev can return a constant function. */ 4288 if (is_gimple_min_invariant (chrec)) 4289 { 4290 set_value_range_to_value (vr, chrec, vr->equiv); 4291 return; 4292 } 4293 4294 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) 4295 return; 4296 4297 init = initial_condition_in_loop_num (chrec, loop->num); 4298 tem = op_with_constant_singleton_value_range (init); 4299 if (tem) 4300 init = tem; 4301 step = evolution_part_in_loop_num (chrec, loop->num); 4302 tem = op_with_constant_singleton_value_range (step); 4303 if (tem) 4304 step = tem; 4305 4306 /* If STEP is symbolic, we can't know whether INIT will be the 4307 minimum or maximum value in the range. Also, unless INIT is 4308 a simple expression, compare_values and possibly other functions 4309 in tree-vrp won't be able to handle it. */ 4310 if (step == NULL_TREE 4311 || !is_gimple_min_invariant (step) 4312 || !valid_value_p (init)) 4313 return; 4314 4315 dir = scev_direction (chrec); 4316 if (/* Do not adjust ranges if we do not know whether the iv increases 4317 or decreases, ... */ 4318 dir == EV_DIR_UNKNOWN 4319 /* ... or if it may wrap. */ 4320 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), 4321 true)) 4322 return; 4323 4324 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of 4325 negative_overflow_infinity and positive_overflow_infinity, 4326 because we have concluded that the loop probably does not 4327 wrap. */ 4328 4329 type = TREE_TYPE (var); 4330 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) 4331 tmin = lower_bound_in_type (type, type); 4332 else 4333 tmin = TYPE_MIN_VALUE (type); 4334 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) 4335 tmax = upper_bound_in_type (type, type); 4336 else 4337 tmax = TYPE_MAX_VALUE (type); 4338 4339 /* Try to use estimated number of iterations for the loop to constrain the 4340 final value in the evolution. */ 4341 if (TREE_CODE (step) == INTEGER_CST 4342 && is_gimple_val (init) 4343 && (TREE_CODE (init) != SSA_NAME 4344 || get_value_range (init)->type == VR_RANGE)) 4345 { 4346 widest_int nit; 4347 4348 /* We are only entering here for loop header PHI nodes, so using 4349 the number of latch executions is the correct thing to use. */ 4350 if (max_loop_iterations (loop, &nit)) 4351 { 4352 value_range_t maxvr = VR_INITIALIZER; 4353 signop sgn = TYPE_SIGN (TREE_TYPE (step)); 4354 bool overflow; 4355 4356 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, 4357 &overflow); 4358 /* If the multiplication overflowed we can't do a meaningful 4359 adjustment. Likewise if the result doesn't fit in the type 4360 of the induction variable. For a signed type we have to 4361 check whether the result has the expected signedness which 4362 is that of the step as number of iterations is unsigned. */ 4363 if (!overflow 4364 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) 4365 && (sgn == UNSIGNED 4366 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0))) 4367 { 4368 tem = wide_int_to_tree (TREE_TYPE (init), wtmp); 4369 extract_range_from_binary_expr (&maxvr, PLUS_EXPR, 4370 TREE_TYPE (init), init, tem); 4371 /* Likewise if the addition did. */ 4372 if (maxvr.type == VR_RANGE) 4373 { 4374 tmin = maxvr.min; 4375 tmax = maxvr.max; 4376 } 4377 } 4378 } 4379 } 4380 4381 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 4382 { 4383 min = tmin; 4384 max = tmax; 4385 4386 /* For VARYING or UNDEFINED ranges, just about anything we get 4387 from scalar evolutions should be better. */ 4388 4389 if (dir == EV_DIR_DECREASES) 4390 max = init; 4391 else 4392 min = init; 4393 } 4394 else if (vr->type == VR_RANGE) 4395 { 4396 min = vr->min; 4397 max = vr->max; 4398 4399 if (dir == EV_DIR_DECREASES) 4400 { 4401 /* INIT is the maximum value. If INIT is lower than VR->MAX 4402 but no smaller than VR->MIN, set VR->MAX to INIT. */ 4403 if (compare_values (init, max) == -1) 4404 max = init; 4405 4406 /* According to the loop information, the variable does not 4407 overflow. If we think it does, probably because of an 4408 overflow due to arithmetic on a different INF value, 4409 reset now. */ 4410 if (is_negative_overflow_infinity (min) 4411 || compare_values (min, tmin) == -1) 4412 min = tmin; 4413 4414 } 4415 else 4416 { 4417 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ 4418 if (compare_values (init, min) == 1) 4419 min = init; 4420 4421 if (is_positive_overflow_infinity (max) 4422 || compare_values (tmax, max) == -1) 4423 max = tmax; 4424 } 4425 } 4426 else 4427 return; 4428 4429 /* If we just created an invalid range with the minimum 4430 greater than the maximum, we fail conservatively. 4431 This should happen only in unreachable 4432 parts of code, or for invalid programs. */ 4433 if (compare_values (min, max) == 1 4434 || (is_negative_overflow_infinity (min) 4435 && is_positive_overflow_infinity (max))) 4436 return; 4437 4438 set_value_range (vr, VR_RANGE, min, max, vr->equiv); 4439} 4440 4441 4442/* Given two numeric value ranges VR0, VR1 and a comparison code COMP: 4443 4444 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for 4445 all the values in the ranges. 4446 4447 - Return BOOLEAN_FALSE_NODE if the comparison always returns false. 4448 4449 - Return NULL_TREE if it is not always possible to determine the 4450 value of the comparison. 4451 4452 Also set *STRICT_OVERFLOW_P to indicate whether a range with an 4453 overflow infinity was used in the test. */ 4454 4455 4456static tree 4457compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, 4458 bool *strict_overflow_p) 4459{ 4460 /* VARYING or UNDEFINED ranges cannot be compared. */ 4461 if (vr0->type == VR_VARYING 4462 || vr0->type == VR_UNDEFINED 4463 || vr1->type == VR_VARYING 4464 || vr1->type == VR_UNDEFINED) 4465 return NULL_TREE; 4466 4467 /* Anti-ranges need to be handled separately. */ 4468 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) 4469 { 4470 /* If both are anti-ranges, then we cannot compute any 4471 comparison. */ 4472 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) 4473 return NULL_TREE; 4474 4475 /* These comparisons are never statically computable. */ 4476 if (comp == GT_EXPR 4477 || comp == GE_EXPR 4478 || comp == LT_EXPR 4479 || comp == LE_EXPR) 4480 return NULL_TREE; 4481 4482 /* Equality can be computed only between a range and an 4483 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ 4484 if (vr0->type == VR_RANGE) 4485 { 4486 /* To simplify processing, make VR0 the anti-range. */ 4487 value_range_t *tmp = vr0; 4488 vr0 = vr1; 4489 vr1 = tmp; 4490 } 4491 4492 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); 4493 4494 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 4495 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) 4496 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 4497 4498 return NULL_TREE; 4499 } 4500 4501 if (!usable_range_p (vr0, strict_overflow_p) 4502 || !usable_range_p (vr1, strict_overflow_p)) 4503 return NULL_TREE; 4504 4505 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the 4506 operands around and change the comparison code. */ 4507 if (comp == GT_EXPR || comp == GE_EXPR) 4508 { 4509 value_range_t *tmp; 4510 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; 4511 tmp = vr0; 4512 vr0 = vr1; 4513 vr1 = tmp; 4514 } 4515 4516 if (comp == EQ_EXPR) 4517 { 4518 /* Equality may only be computed if both ranges represent 4519 exactly one value. */ 4520 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 4521 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) 4522 { 4523 int cmp_min = compare_values_warnv (vr0->min, vr1->min, 4524 strict_overflow_p); 4525 int cmp_max = compare_values_warnv (vr0->max, vr1->max, 4526 strict_overflow_p); 4527 if (cmp_min == 0 && cmp_max == 0) 4528 return boolean_true_node; 4529 else if (cmp_min != -2 && cmp_max != -2) 4530 return boolean_false_node; 4531 } 4532 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ 4533 else if (compare_values_warnv (vr0->min, vr1->max, 4534 strict_overflow_p) == 1 4535 || compare_values_warnv (vr1->min, vr0->max, 4536 strict_overflow_p) == 1) 4537 return boolean_false_node; 4538 4539 return NULL_TREE; 4540 } 4541 else if (comp == NE_EXPR) 4542 { 4543 int cmp1, cmp2; 4544 4545 /* If VR0 is completely to the left or completely to the right 4546 of VR1, they are always different. Notice that we need to 4547 make sure that both comparisons yield similar results to 4548 avoid comparing values that cannot be compared at 4549 compile-time. */ 4550 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 4551 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 4552 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) 4553 return boolean_true_node; 4554 4555 /* If VR0 and VR1 represent a single value and are identical, 4556 return false. */ 4557 else if (compare_values_warnv (vr0->min, vr0->max, 4558 strict_overflow_p) == 0 4559 && compare_values_warnv (vr1->min, vr1->max, 4560 strict_overflow_p) == 0 4561 && compare_values_warnv (vr0->min, vr1->min, 4562 strict_overflow_p) == 0 4563 && compare_values_warnv (vr0->max, vr1->max, 4564 strict_overflow_p) == 0) 4565 return boolean_false_node; 4566 4567 /* Otherwise, they may or may not be different. */ 4568 else 4569 return NULL_TREE; 4570 } 4571 else if (comp == LT_EXPR || comp == LE_EXPR) 4572 { 4573 int tst; 4574 4575 /* If VR0 is to the left of VR1, return true. */ 4576 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 4577 if ((comp == LT_EXPR && tst == -1) 4578 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 4579 { 4580 if (overflow_infinity_range_p (vr0) 4581 || overflow_infinity_range_p (vr1)) 4582 *strict_overflow_p = true; 4583 return boolean_true_node; 4584 } 4585 4586 /* If VR0 is to the right of VR1, return false. */ 4587 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 4588 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 4589 || (comp == LE_EXPR && tst == 1)) 4590 { 4591 if (overflow_infinity_range_p (vr0) 4592 || overflow_infinity_range_p (vr1)) 4593 *strict_overflow_p = true; 4594 return boolean_false_node; 4595 } 4596 4597 /* Otherwise, we don't know. */ 4598 return NULL_TREE; 4599 } 4600 4601 gcc_unreachable (); 4602} 4603 4604 4605/* Given a value range VR, a value VAL and a comparison code COMP, return 4606 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the 4607 values in VR. Return BOOLEAN_FALSE_NODE if the comparison 4608 always returns false. Return NULL_TREE if it is not always 4609 possible to determine the value of the comparison. Also set 4610 *STRICT_OVERFLOW_P to indicate whether a range with an overflow 4611 infinity was used in the test. */ 4612 4613static tree 4614compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, 4615 bool *strict_overflow_p) 4616{ 4617 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 4618 return NULL_TREE; 4619 4620 /* Anti-ranges need to be handled separately. */ 4621 if (vr->type == VR_ANTI_RANGE) 4622 { 4623 /* For anti-ranges, the only predicates that we can compute at 4624 compile time are equality and inequality. */ 4625 if (comp == GT_EXPR 4626 || comp == GE_EXPR 4627 || comp == LT_EXPR 4628 || comp == LE_EXPR) 4629 return NULL_TREE; 4630 4631 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ 4632 if (value_inside_range (val, vr->min, vr->max) == 1) 4633 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 4634 4635 return NULL_TREE; 4636 } 4637 4638 if (!usable_range_p (vr, strict_overflow_p)) 4639 return NULL_TREE; 4640 4641 if (comp == EQ_EXPR) 4642 { 4643 /* EQ_EXPR may only be computed if VR represents exactly 4644 one value. */ 4645 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) 4646 { 4647 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); 4648 if (cmp == 0) 4649 return boolean_true_node; 4650 else if (cmp == -1 || cmp == 1 || cmp == 2) 4651 return boolean_false_node; 4652 } 4653 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 4654 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) 4655 return boolean_false_node; 4656 4657 return NULL_TREE; 4658 } 4659 else if (comp == NE_EXPR) 4660 { 4661 /* If VAL is not inside VR, then they are always different. */ 4662 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 4663 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) 4664 return boolean_true_node; 4665 4666 /* If VR represents exactly one value equal to VAL, then return 4667 false. */ 4668 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 4669 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) 4670 return boolean_false_node; 4671 4672 /* Otherwise, they may or may not be different. */ 4673 return NULL_TREE; 4674 } 4675 else if (comp == LT_EXPR || comp == LE_EXPR) 4676 { 4677 int tst; 4678 4679 /* If VR is to the left of VAL, return true. */ 4680 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 4681 if ((comp == LT_EXPR && tst == -1) 4682 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 4683 { 4684 if (overflow_infinity_range_p (vr)) 4685 *strict_overflow_p = true; 4686 return boolean_true_node; 4687 } 4688 4689 /* If VR is to the right of VAL, return false. */ 4690 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 4691 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 4692 || (comp == LE_EXPR && tst == 1)) 4693 { 4694 if (overflow_infinity_range_p (vr)) 4695 *strict_overflow_p = true; 4696 return boolean_false_node; 4697 } 4698 4699 /* Otherwise, we don't know. */ 4700 return NULL_TREE; 4701 } 4702 else if (comp == GT_EXPR || comp == GE_EXPR) 4703 { 4704 int tst; 4705 4706 /* If VR is to the right of VAL, return true. */ 4707 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 4708 if ((comp == GT_EXPR && tst == 1) 4709 || (comp == GE_EXPR && (tst == 0 || tst == 1))) 4710 { 4711 if (overflow_infinity_range_p (vr)) 4712 *strict_overflow_p = true; 4713 return boolean_true_node; 4714 } 4715 4716 /* If VR is to the left of VAL, return false. */ 4717 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 4718 if ((comp == GT_EXPR && (tst == -1 || tst == 0)) 4719 || (comp == GE_EXPR && tst == -1)) 4720 { 4721 if (overflow_infinity_range_p (vr)) 4722 *strict_overflow_p = true; 4723 return boolean_false_node; 4724 } 4725 4726 /* Otherwise, we don't know. */ 4727 return NULL_TREE; 4728 } 4729 4730 gcc_unreachable (); 4731} 4732 4733 4734/* Debugging dumps. */ 4735 4736void dump_value_range (FILE *, value_range_t *); 4737void debug_value_range (value_range_t *); 4738void dump_all_value_ranges (FILE *); 4739void debug_all_value_ranges (void); 4740void dump_vr_equiv (FILE *, bitmap); 4741void debug_vr_equiv (bitmap); 4742 4743 4744/* Dump value range VR to FILE. */ 4745 4746void 4747dump_value_range (FILE *file, value_range_t *vr) 4748{ 4749 if (vr == NULL) 4750 fprintf (file, "[]"); 4751 else if (vr->type == VR_UNDEFINED) 4752 fprintf (file, "UNDEFINED"); 4753 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 4754 { 4755 tree type = TREE_TYPE (vr->min); 4756 4757 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); 4758 4759 if (is_negative_overflow_infinity (vr->min)) 4760 fprintf (file, "-INF(OVF)"); 4761 else if (INTEGRAL_TYPE_P (type) 4762 && !TYPE_UNSIGNED (type) 4763 && vrp_val_is_min (vr->min)) 4764 fprintf (file, "-INF"); 4765 else 4766 print_generic_expr (file, vr->min, 0); 4767 4768 fprintf (file, ", "); 4769 4770 if (is_positive_overflow_infinity (vr->max)) 4771 fprintf (file, "+INF(OVF)"); 4772 else if (INTEGRAL_TYPE_P (type) 4773 && vrp_val_is_max (vr->max)) 4774 fprintf (file, "+INF"); 4775 else 4776 print_generic_expr (file, vr->max, 0); 4777 4778 fprintf (file, "]"); 4779 4780 if (vr->equiv) 4781 { 4782 bitmap_iterator bi; 4783 unsigned i, c = 0; 4784 4785 fprintf (file, " EQUIVALENCES: { "); 4786 4787 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) 4788 { 4789 print_generic_expr (file, ssa_name (i), 0); 4790 fprintf (file, " "); 4791 c++; 4792 } 4793 4794 fprintf (file, "} (%u elements)", c); 4795 } 4796 } 4797 else if (vr->type == VR_VARYING) 4798 fprintf (file, "VARYING"); 4799 else 4800 fprintf (file, "INVALID RANGE"); 4801} 4802 4803 4804/* Dump value range VR to stderr. */ 4805 4806DEBUG_FUNCTION void 4807debug_value_range (value_range_t *vr) 4808{ 4809 dump_value_range (stderr, vr); 4810 fprintf (stderr, "\n"); 4811} 4812 4813 4814/* Dump value ranges of all SSA_NAMEs to FILE. */ 4815 4816void 4817dump_all_value_ranges (FILE *file) 4818{ 4819 size_t i; 4820 4821 for (i = 0; i < num_vr_values; i++) 4822 { 4823 if (vr_value[i]) 4824 { 4825 print_generic_expr (file, ssa_name (i), 0); 4826 fprintf (file, ": "); 4827 dump_value_range (file, vr_value[i]); 4828 fprintf (file, "\n"); 4829 } 4830 } 4831 4832 fprintf (file, "\n"); 4833} 4834 4835 4836/* Dump all value ranges to stderr. */ 4837 4838DEBUG_FUNCTION void 4839debug_all_value_ranges (void) 4840{ 4841 dump_all_value_ranges (stderr); 4842} 4843 4844 4845/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, 4846 create a new SSA name N and return the assertion assignment 4847 'N = ASSERT_EXPR <V, V OP W>'. */ 4848 4849static gimple 4850build_assert_expr_for (tree cond, tree v) 4851{ 4852 tree a; 4853 gassign *assertion; 4854 4855 gcc_assert (TREE_CODE (v) == SSA_NAME 4856 && COMPARISON_CLASS_P (cond)); 4857 4858 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); 4859 assertion = gimple_build_assign (NULL_TREE, a); 4860 4861 /* The new ASSERT_EXPR, creates a new SSA name that replaces the 4862 operand of the ASSERT_EXPR. Create it so the new name and the old one 4863 are registered in the replacement table so that we can fix the SSA web 4864 after adding all the ASSERT_EXPRs. */ 4865 create_new_def_for (v, assertion, NULL); 4866 4867 return assertion; 4868} 4869 4870 4871/* Return false if EXPR is a predicate expression involving floating 4872 point values. */ 4873 4874static inline bool 4875fp_predicate (gimple stmt) 4876{ 4877 GIMPLE_CHECK (stmt, GIMPLE_COND); 4878 4879 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); 4880} 4881 4882/* If the range of values taken by OP can be inferred after STMT executes, 4883 return the comparison code (COMP_CODE_P) and value (VAL_P) that 4884 describes the inferred range. Return true if a range could be 4885 inferred. */ 4886 4887static bool 4888infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) 4889{ 4890 *val_p = NULL_TREE; 4891 *comp_code_p = ERROR_MARK; 4892 4893 /* Do not attempt to infer anything in names that flow through 4894 abnormal edges. */ 4895 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) 4896 return false; 4897 4898 /* Similarly, don't infer anything from statements that may throw 4899 exceptions. ??? Relax this requirement? */ 4900 if (stmt_could_throw_p (stmt)) 4901 return false; 4902 4903 /* If STMT is the last statement of a basic block with no normal 4904 successors, there is no point inferring anything about any of its 4905 operands. We would not be able to find a proper insertion point 4906 for the assertion, anyway. */ 4907 if (stmt_ends_bb_p (stmt)) 4908 { 4909 edge_iterator ei; 4910 edge e; 4911 4912 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) 4913 if (!(e->flags & EDGE_ABNORMAL)) 4914 break; 4915 if (e == NULL) 4916 return false; 4917 } 4918 4919 if (infer_nonnull_range (stmt, op, true, true)) 4920 { 4921 *val_p = build_int_cst (TREE_TYPE (op), 0); 4922 *comp_code_p = NE_EXPR; 4923 return true; 4924 } 4925 4926 return false; 4927} 4928 4929 4930void dump_asserts_for (FILE *, tree); 4931void debug_asserts_for (tree); 4932void dump_all_asserts (FILE *); 4933void debug_all_asserts (void); 4934 4935/* Dump all the registered assertions for NAME to FILE. */ 4936 4937void 4938dump_asserts_for (FILE *file, tree name) 4939{ 4940 assert_locus_t loc; 4941 4942 fprintf (file, "Assertions to be inserted for "); 4943 print_generic_expr (file, name, 0); 4944 fprintf (file, "\n"); 4945 4946 loc = asserts_for[SSA_NAME_VERSION (name)]; 4947 while (loc) 4948 { 4949 fprintf (file, "\t"); 4950 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); 4951 fprintf (file, "\n\tBB #%d", loc->bb->index); 4952 if (loc->e) 4953 { 4954 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, 4955 loc->e->dest->index); 4956 dump_edge_info (file, loc->e, dump_flags, 0); 4957 } 4958 fprintf (file, "\n\tPREDICATE: "); 4959 print_generic_expr (file, name, 0); 4960 fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); 4961 print_generic_expr (file, loc->val, 0); 4962 fprintf (file, "\n\n"); 4963 loc = loc->next; 4964 } 4965 4966 fprintf (file, "\n"); 4967} 4968 4969 4970/* Dump all the registered assertions for NAME to stderr. */ 4971 4972DEBUG_FUNCTION void 4973debug_asserts_for (tree name) 4974{ 4975 dump_asserts_for (stderr, name); 4976} 4977 4978 4979/* Dump all the registered assertions for all the names to FILE. */ 4980 4981void 4982dump_all_asserts (FILE *file) 4983{ 4984 unsigned i; 4985 bitmap_iterator bi; 4986 4987 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); 4988 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 4989 dump_asserts_for (file, ssa_name (i)); 4990 fprintf (file, "\n"); 4991} 4992 4993 4994/* Dump all the registered assertions for all the names to stderr. */ 4995 4996DEBUG_FUNCTION void 4997debug_all_asserts (void) 4998{ 4999 dump_all_asserts (stderr); 5000} 5001 5002 5003/* If NAME doesn't have an ASSERT_EXPR registered for asserting 5004 'EXPR COMP_CODE VAL' at a location that dominates block BB or 5005 E->DEST, then register this location as a possible insertion point 5006 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. 5007 5008 BB, E and SI provide the exact insertion point for the new 5009 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted 5010 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on 5011 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E 5012 must not be NULL. */ 5013 5014static void 5015register_new_assert_for (tree name, tree expr, 5016 enum tree_code comp_code, 5017 tree val, 5018 basic_block bb, 5019 edge e, 5020 gimple_stmt_iterator si) 5021{ 5022 assert_locus_t n, loc, last_loc; 5023 basic_block dest_bb; 5024 5025 gcc_checking_assert (bb == NULL || e == NULL); 5026 5027 if (e == NULL) 5028 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND 5029 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); 5030 5031 /* Never build an assert comparing against an integer constant with 5032 TREE_OVERFLOW set. This confuses our undefined overflow warning 5033 machinery. */ 5034 if (TREE_OVERFLOW_P (val)) 5035 val = drop_tree_overflow (val); 5036 5037 /* The new assertion A will be inserted at BB or E. We need to 5038 determine if the new location is dominated by a previously 5039 registered location for A. If we are doing an edge insertion, 5040 assume that A will be inserted at E->DEST. Note that this is not 5041 necessarily true. 5042 5043 If E is a critical edge, it will be split. But even if E is 5044 split, the new block will dominate the same set of blocks that 5045 E->DEST dominates. 5046 5047 The reverse, however, is not true, blocks dominated by E->DEST 5048 will not be dominated by the new block created to split E. So, 5049 if the insertion location is on a critical edge, we will not use 5050 the new location to move another assertion previously registered 5051 at a block dominated by E->DEST. */ 5052 dest_bb = (bb) ? bb : e->dest; 5053 5054 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and 5055 VAL at a block dominating DEST_BB, then we don't need to insert a new 5056 one. Similarly, if the same assertion already exists at a block 5057 dominated by DEST_BB and the new location is not on a critical 5058 edge, then update the existing location for the assertion (i.e., 5059 move the assertion up in the dominance tree). 5060 5061 Note, this is implemented as a simple linked list because there 5062 should not be more than a handful of assertions registered per 5063 name. If this becomes a performance problem, a table hashed by 5064 COMP_CODE and VAL could be implemented. */ 5065 loc = asserts_for[SSA_NAME_VERSION (name)]; 5066 last_loc = loc; 5067 while (loc) 5068 { 5069 if (loc->comp_code == comp_code 5070 && (loc->val == val 5071 || operand_equal_p (loc->val, val, 0)) 5072 && (loc->expr == expr 5073 || operand_equal_p (loc->expr, expr, 0))) 5074 { 5075 /* If E is not a critical edge and DEST_BB 5076 dominates the existing location for the assertion, move 5077 the assertion up in the dominance tree by updating its 5078 location information. */ 5079 if ((e == NULL || !EDGE_CRITICAL_P (e)) 5080 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) 5081 { 5082 loc->bb = dest_bb; 5083 loc->e = e; 5084 loc->si = si; 5085 return; 5086 } 5087 } 5088 5089 /* Update the last node of the list and move to the next one. */ 5090 last_loc = loc; 5091 loc = loc->next; 5092 } 5093 5094 /* If we didn't find an assertion already registered for 5095 NAME COMP_CODE VAL, add a new one at the end of the list of 5096 assertions associated with NAME. */ 5097 n = XNEW (struct assert_locus_d); 5098 n->bb = dest_bb; 5099 n->e = e; 5100 n->si = si; 5101 n->comp_code = comp_code; 5102 n->val = val; 5103 n->expr = expr; 5104 n->next = NULL; 5105 5106 if (last_loc) 5107 last_loc->next = n; 5108 else 5109 asserts_for[SSA_NAME_VERSION (name)] = n; 5110 5111 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); 5112} 5113 5114/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. 5115 Extract a suitable test code and value and store them into *CODE_P and 5116 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. 5117 5118 If no extraction was possible, return FALSE, otherwise return TRUE. 5119 5120 If INVERT is true, then we invert the result stored into *CODE_P. */ 5121 5122static bool 5123extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, 5124 tree cond_op0, tree cond_op1, 5125 bool invert, enum tree_code *code_p, 5126 tree *val_p) 5127{ 5128 enum tree_code comp_code; 5129 tree val; 5130 5131 /* Otherwise, we have a comparison of the form NAME COMP VAL 5132 or VAL COMP NAME. */ 5133 if (name == cond_op1) 5134 { 5135 /* If the predicate is of the form VAL COMP NAME, flip 5136 COMP around because we need to register NAME as the 5137 first operand in the predicate. */ 5138 comp_code = swap_tree_comparison (cond_code); 5139 val = cond_op0; 5140 } 5141 else 5142 { 5143 /* The comparison is of the form NAME COMP VAL, so the 5144 comparison code remains unchanged. */ 5145 comp_code = cond_code; 5146 val = cond_op1; 5147 } 5148 5149 /* Invert the comparison code as necessary. */ 5150 if (invert) 5151 comp_code = invert_tree_comparison (comp_code, 0); 5152 5153 /* VRP does not handle float types. */ 5154 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) 5155 return false; 5156 5157 /* Do not register always-false predicates. 5158 FIXME: this works around a limitation in fold() when dealing with 5159 enumerations. Given 'enum { N1, N2 } x;', fold will not 5160 fold 'if (x > N2)' to 'if (0)'. */ 5161 if ((comp_code == GT_EXPR || comp_code == LT_EXPR) 5162 && INTEGRAL_TYPE_P (TREE_TYPE (val))) 5163 { 5164 tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); 5165 tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); 5166 5167 if (comp_code == GT_EXPR 5168 && (!max 5169 || compare_values (val, max) == 0)) 5170 return false; 5171 5172 if (comp_code == LT_EXPR 5173 && (!min 5174 || compare_values (val, min) == 0)) 5175 return false; 5176 } 5177 *code_p = comp_code; 5178 *val_p = val; 5179 return true; 5180} 5181 5182/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any 5183 (otherwise return VAL). VAL and MASK must be zero-extended for 5184 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT 5185 (to transform signed values into unsigned) and at the end xor 5186 SGNBIT back. */ 5187 5188static wide_int 5189masked_increment (const wide_int &val_in, const wide_int &mask, 5190 const wide_int &sgnbit, unsigned int prec) 5191{ 5192 wide_int bit = wi::one (prec), res; 5193 unsigned int i; 5194 5195 wide_int val = val_in ^ sgnbit; 5196 for (i = 0; i < prec; i++, bit += bit) 5197 { 5198 res = mask; 5199 if ((res & bit) == 0) 5200 continue; 5201 res = bit - 1; 5202 res = (val + bit).and_not (res); 5203 res &= mask; 5204 if (wi::gtu_p (res, val)) 5205 return res ^ sgnbit; 5206 } 5207 return val ^ sgnbit; 5208} 5209 5210/* Try to register an edge assertion for SSA name NAME on edge E for 5211 the condition COND contributing to the conditional jump pointed to by BSI. 5212 Invert the condition COND if INVERT is true. */ 5213 5214static void 5215register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, 5216 enum tree_code cond_code, 5217 tree cond_op0, tree cond_op1, bool invert) 5218{ 5219 tree val; 5220 enum tree_code comp_code; 5221 5222 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 5223 cond_op0, 5224 cond_op1, 5225 invert, &comp_code, &val)) 5226 return; 5227 5228 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph 5229 reachable from E. */ 5230 if (live_on_edge (e, name) 5231 && !has_single_use (name)) 5232 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); 5233 5234 /* In the case of NAME <= CST and NAME being defined as 5235 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 5236 and NAME2 <= CST - CST2. We can do the same for NAME > CST. 5237 This catches range and anti-range tests. */ 5238 if ((comp_code == LE_EXPR 5239 || comp_code == GT_EXPR) 5240 && TREE_CODE (val) == INTEGER_CST 5241 && TYPE_UNSIGNED (TREE_TYPE (val))) 5242 { 5243 gimple def_stmt = SSA_NAME_DEF_STMT (name); 5244 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; 5245 5246 /* Extract CST2 from the (optional) addition. */ 5247 if (is_gimple_assign (def_stmt) 5248 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) 5249 { 5250 name2 = gimple_assign_rhs1 (def_stmt); 5251 cst2 = gimple_assign_rhs2 (def_stmt); 5252 if (TREE_CODE (name2) == SSA_NAME 5253 && TREE_CODE (cst2) == INTEGER_CST) 5254 def_stmt = SSA_NAME_DEF_STMT (name2); 5255 } 5256 5257 /* Extract NAME2 from the (optional) sign-changing cast. */ 5258 if (gimple_assign_cast_p (def_stmt)) 5259 { 5260 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) 5261 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) 5262 && (TYPE_PRECISION (gimple_expr_type (def_stmt)) 5263 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) 5264 name3 = gimple_assign_rhs1 (def_stmt); 5265 } 5266 5267 /* If name3 is used later, create an ASSERT_EXPR for it. */ 5268 if (name3 != NULL_TREE 5269 && TREE_CODE (name3) == SSA_NAME 5270 && (cst2 == NULL_TREE 5271 || TREE_CODE (cst2) == INTEGER_CST) 5272 && INTEGRAL_TYPE_P (TREE_TYPE (name3)) 5273 && live_on_edge (e, name3) 5274 && !has_single_use (name3)) 5275 { 5276 tree tmp; 5277 5278 /* Build an expression for the range test. */ 5279 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); 5280 if (cst2 != NULL_TREE) 5281 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 5282 5283 if (dump_file) 5284 { 5285 fprintf (dump_file, "Adding assert for "); 5286 print_generic_expr (dump_file, name3, 0); 5287 fprintf (dump_file, " from "); 5288 print_generic_expr (dump_file, tmp, 0); 5289 fprintf (dump_file, "\n"); 5290 } 5291 5292 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); 5293 } 5294 5295 /* If name2 is used later, create an ASSERT_EXPR for it. */ 5296 if (name2 != NULL_TREE 5297 && TREE_CODE (name2) == SSA_NAME 5298 && TREE_CODE (cst2) == INTEGER_CST 5299 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 5300 && live_on_edge (e, name2) 5301 && !has_single_use (name2)) 5302 { 5303 tree tmp; 5304 5305 /* Build an expression for the range test. */ 5306 tmp = name2; 5307 if (TREE_TYPE (name) != TREE_TYPE (name2)) 5308 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); 5309 if (cst2 != NULL_TREE) 5310 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 5311 5312 if (dump_file) 5313 { 5314 fprintf (dump_file, "Adding assert for "); 5315 print_generic_expr (dump_file, name2, 0); 5316 fprintf (dump_file, " from "); 5317 print_generic_expr (dump_file, tmp, 0); 5318 fprintf (dump_file, "\n"); 5319 } 5320 5321 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); 5322 } 5323 } 5324 5325 /* In the case of post-in/decrement tests like if (i++) ... and uses 5326 of the in/decremented value on the edge the extra name we want to 5327 assert for is not on the def chain of the name compared. Instead 5328 it is in the set of use stmts. */ 5329 if ((comp_code == NE_EXPR 5330 || comp_code == EQ_EXPR) 5331 && TREE_CODE (val) == INTEGER_CST) 5332 { 5333 imm_use_iterator ui; 5334 gimple use_stmt; 5335 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) 5336 { 5337 /* Cut off to use-stmts that are in the predecessor. */ 5338 if (gimple_bb (use_stmt) != e->src) 5339 continue; 5340 5341 if (!is_gimple_assign (use_stmt)) 5342 continue; 5343 5344 enum tree_code code = gimple_assign_rhs_code (use_stmt); 5345 if (code != PLUS_EXPR 5346 && code != MINUS_EXPR) 5347 continue; 5348 5349 tree cst = gimple_assign_rhs2 (use_stmt); 5350 if (TREE_CODE (cst) != INTEGER_CST) 5351 continue; 5352 5353 tree name2 = gimple_assign_lhs (use_stmt); 5354 if (live_on_edge (e, name2)) 5355 { 5356 cst = int_const_binop (code, val, cst); 5357 register_new_assert_for (name2, name2, comp_code, cst, 5358 NULL, e, bsi); 5359 } 5360 } 5361 } 5362 5363 if (TREE_CODE_CLASS (comp_code) == tcc_comparison 5364 && TREE_CODE (val) == INTEGER_CST) 5365 { 5366 gimple def_stmt = SSA_NAME_DEF_STMT (name); 5367 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; 5368 tree val2 = NULL_TREE; 5369 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); 5370 wide_int mask = wi::zero (prec); 5371 unsigned int nprec = prec; 5372 enum tree_code rhs_code = ERROR_MARK; 5373 5374 if (is_gimple_assign (def_stmt)) 5375 rhs_code = gimple_assign_rhs_code (def_stmt); 5376 5377 /* Add asserts for NAME cmp CST and NAME being defined 5378 as NAME = (int) NAME2. */ 5379 if (!TYPE_UNSIGNED (TREE_TYPE (val)) 5380 && (comp_code == LE_EXPR || comp_code == LT_EXPR 5381 || comp_code == GT_EXPR || comp_code == GE_EXPR) 5382 && gimple_assign_cast_p (def_stmt)) 5383 { 5384 name2 = gimple_assign_rhs1 (def_stmt); 5385 if (CONVERT_EXPR_CODE_P (rhs_code) 5386 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 5387 && TYPE_UNSIGNED (TREE_TYPE (name2)) 5388 && prec == TYPE_PRECISION (TREE_TYPE (name2)) 5389 && (comp_code == LE_EXPR || comp_code == GT_EXPR 5390 || !tree_int_cst_equal (val, 5391 TYPE_MIN_VALUE (TREE_TYPE (val)))) 5392 && live_on_edge (e, name2) 5393 && !has_single_use (name2)) 5394 { 5395 tree tmp, cst; 5396 enum tree_code new_comp_code = comp_code; 5397 5398 cst = fold_convert (TREE_TYPE (name2), 5399 TYPE_MIN_VALUE (TREE_TYPE (val))); 5400 /* Build an expression for the range test. */ 5401 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); 5402 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, 5403 fold_convert (TREE_TYPE (name2), val)); 5404 if (comp_code == LT_EXPR || comp_code == GE_EXPR) 5405 { 5406 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; 5407 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, 5408 build_int_cst (TREE_TYPE (name2), 1)); 5409 } 5410 5411 if (dump_file) 5412 { 5413 fprintf (dump_file, "Adding assert for "); 5414 print_generic_expr (dump_file, name2, 0); 5415 fprintf (dump_file, " from "); 5416 print_generic_expr (dump_file, tmp, 0); 5417 fprintf (dump_file, "\n"); 5418 } 5419 5420 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL, 5421 e, bsi); 5422 } 5423 } 5424 5425 /* Add asserts for NAME cmp CST and NAME being defined as 5426 NAME = NAME2 >> CST2. 5427 5428 Extract CST2 from the right shift. */ 5429 if (rhs_code == RSHIFT_EXPR) 5430 { 5431 name2 = gimple_assign_rhs1 (def_stmt); 5432 cst2 = gimple_assign_rhs2 (def_stmt); 5433 if (TREE_CODE (name2) == SSA_NAME 5434 && tree_fits_uhwi_p (cst2) 5435 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 5436 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) 5437 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) 5438 && live_on_edge (e, name2) 5439 && !has_single_use (name2)) 5440 { 5441 mask = wi::mask (tree_to_uhwi (cst2), false, prec); 5442 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); 5443 } 5444 } 5445 if (val2 != NULL_TREE 5446 && TREE_CODE (val2) == INTEGER_CST 5447 && simple_cst_equal (fold_build2 (RSHIFT_EXPR, 5448 TREE_TYPE (val), 5449 val2, cst2), val)) 5450 { 5451 enum tree_code new_comp_code = comp_code; 5452 tree tmp, new_val; 5453 5454 tmp = name2; 5455 if (comp_code == EQ_EXPR || comp_code == NE_EXPR) 5456 { 5457 if (!TYPE_UNSIGNED (TREE_TYPE (val))) 5458 { 5459 tree type = build_nonstandard_integer_type (prec, 1); 5460 tmp = build1 (NOP_EXPR, type, name2); 5461 val2 = fold_convert (type, val2); 5462 } 5463 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); 5464 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); 5465 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; 5466 } 5467 else if (comp_code == LT_EXPR || comp_code == GE_EXPR) 5468 { 5469 wide_int minval 5470 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); 5471 new_val = val2; 5472 if (minval == new_val) 5473 new_val = NULL_TREE; 5474 } 5475 else 5476 { 5477 wide_int maxval 5478 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); 5479 mask |= val2; 5480 if (mask == maxval) 5481 new_val = NULL_TREE; 5482 else 5483 new_val = wide_int_to_tree (TREE_TYPE (val2), mask); 5484 } 5485 5486 if (new_val) 5487 { 5488 if (dump_file) 5489 { 5490 fprintf (dump_file, "Adding assert for "); 5491 print_generic_expr (dump_file, name2, 0); 5492 fprintf (dump_file, " from "); 5493 print_generic_expr (dump_file, tmp, 0); 5494 fprintf (dump_file, "\n"); 5495 } 5496 5497 register_new_assert_for (name2, tmp, new_comp_code, new_val, 5498 NULL, e, bsi); 5499 } 5500 } 5501 5502 /* Add asserts for NAME cmp CST and NAME being defined as 5503 NAME = NAME2 & CST2. 5504 5505 Extract CST2 from the and. 5506 5507 Also handle 5508 NAME = (unsigned) NAME2; 5509 casts where NAME's type is unsigned and has smaller precision 5510 than NAME2's type as if it was NAME = NAME2 & MASK. */ 5511 names[0] = NULL_TREE; 5512 names[1] = NULL_TREE; 5513 cst2 = NULL_TREE; 5514 if (rhs_code == BIT_AND_EXPR 5515 || (CONVERT_EXPR_CODE_P (rhs_code) 5516 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE 5517 && TYPE_UNSIGNED (TREE_TYPE (val)) 5518 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) 5519 > prec)) 5520 { 5521 name2 = gimple_assign_rhs1 (def_stmt); 5522 if (rhs_code == BIT_AND_EXPR) 5523 cst2 = gimple_assign_rhs2 (def_stmt); 5524 else 5525 { 5526 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); 5527 nprec = TYPE_PRECISION (TREE_TYPE (name2)); 5528 } 5529 if (TREE_CODE (name2) == SSA_NAME 5530 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 5531 && TREE_CODE (cst2) == INTEGER_CST 5532 && !integer_zerop (cst2) 5533 && (nprec > 1 5534 || TYPE_UNSIGNED (TREE_TYPE (val)))) 5535 { 5536 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2); 5537 if (gimple_assign_cast_p (def_stmt2)) 5538 { 5539 names[1] = gimple_assign_rhs1 (def_stmt2); 5540 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) 5541 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) 5542 || (TYPE_PRECISION (TREE_TYPE (name2)) 5543 != TYPE_PRECISION (TREE_TYPE (names[1]))) 5544 || !live_on_edge (e, names[1]) 5545 || has_single_use (names[1])) 5546 names[1] = NULL_TREE; 5547 } 5548 if (live_on_edge (e, name2) 5549 && !has_single_use (name2)) 5550 names[0] = name2; 5551 } 5552 } 5553 if (names[0] || names[1]) 5554 { 5555 wide_int minv, maxv, valv, cst2v; 5556 wide_int tem, sgnbit; 5557 bool valid_p = false, valn, cst2n; 5558 enum tree_code ccode = comp_code; 5559 5560 valv = wide_int::from (val, nprec, UNSIGNED); 5561 cst2v = wide_int::from (cst2, nprec, UNSIGNED); 5562 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); 5563 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); 5564 /* If CST2 doesn't have most significant bit set, 5565 but VAL is negative, we have comparison like 5566 if ((x & 0x123) > -4) (always true). Just give up. */ 5567 if (!cst2n && valn) 5568 ccode = ERROR_MARK; 5569 if (cst2n) 5570 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); 5571 else 5572 sgnbit = wi::zero (nprec); 5573 minv = valv & cst2v; 5574 switch (ccode) 5575 { 5576 case EQ_EXPR: 5577 /* Minimum unsigned value for equality is VAL & CST2 5578 (should be equal to VAL, otherwise we probably should 5579 have folded the comparison into false) and 5580 maximum unsigned value is VAL | ~CST2. */ 5581 maxv = valv | ~cst2v; 5582 valid_p = true; 5583 break; 5584 5585 case NE_EXPR: 5586 tem = valv | ~cst2v; 5587 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ 5588 if (valv == 0) 5589 { 5590 cst2n = false; 5591 sgnbit = wi::zero (nprec); 5592 goto gt_expr; 5593 } 5594 /* If (VAL | ~CST2) is all ones, handle it as 5595 (X & CST2) < VAL. */ 5596 if (tem == -1) 5597 { 5598 cst2n = false; 5599 valn = false; 5600 sgnbit = wi::zero (nprec); 5601 goto lt_expr; 5602 } 5603 if (!cst2n && wi::neg_p (cst2v)) 5604 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); 5605 if (sgnbit != 0) 5606 { 5607 if (valv == sgnbit) 5608 { 5609 cst2n = true; 5610 valn = true; 5611 goto gt_expr; 5612 } 5613 if (tem == wi::mask (nprec - 1, false, nprec)) 5614 { 5615 cst2n = true; 5616 goto lt_expr; 5617 } 5618 if (!cst2n) 5619 sgnbit = wi::zero (nprec); 5620 } 5621 break; 5622 5623 case GE_EXPR: 5624 /* Minimum unsigned value for >= if (VAL & CST2) == VAL 5625 is VAL and maximum unsigned value is ~0. For signed 5626 comparison, if CST2 doesn't have most significant bit 5627 set, handle it similarly. If CST2 has MSB set, 5628 the minimum is the same, and maximum is ~0U/2. */ 5629 if (minv != valv) 5630 { 5631 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to 5632 VAL. */ 5633 minv = masked_increment (valv, cst2v, sgnbit, nprec); 5634 if (minv == valv) 5635 break; 5636 } 5637 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); 5638 valid_p = true; 5639 break; 5640 5641 case GT_EXPR: 5642 gt_expr: 5643 /* Find out smallest MINV where MINV > VAL 5644 && (MINV & CST2) == MINV, if any. If VAL is signed and 5645 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ 5646 minv = masked_increment (valv, cst2v, sgnbit, nprec); 5647 if (minv == valv) 5648 break; 5649 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); 5650 valid_p = true; 5651 break; 5652 5653 case LE_EXPR: 5654 /* Minimum unsigned value for <= is 0 and maximum 5655 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. 5656 Otherwise, find smallest VAL2 where VAL2 > VAL 5657 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 5658 as maximum. 5659 For signed comparison, if CST2 doesn't have most 5660 significant bit set, handle it similarly. If CST2 has 5661 MSB set, the maximum is the same and minimum is INT_MIN. */ 5662 if (minv == valv) 5663 maxv = valv; 5664 else 5665 { 5666 maxv = masked_increment (valv, cst2v, sgnbit, nprec); 5667 if (maxv == valv) 5668 break; 5669 maxv -= 1; 5670 } 5671 maxv |= ~cst2v; 5672 minv = sgnbit; 5673 valid_p = true; 5674 break; 5675 5676 case LT_EXPR: 5677 lt_expr: 5678 /* Minimum unsigned value for < is 0 and maximum 5679 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. 5680 Otherwise, find smallest VAL2 where VAL2 > VAL 5681 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 5682 as maximum. 5683 For signed comparison, if CST2 doesn't have most 5684 significant bit set, handle it similarly. If CST2 has 5685 MSB set, the maximum is the same and minimum is INT_MIN. */ 5686 if (minv == valv) 5687 { 5688 if (valv == sgnbit) 5689 break; 5690 maxv = valv; 5691 } 5692 else 5693 { 5694 maxv = masked_increment (valv, cst2v, sgnbit, nprec); 5695 if (maxv == valv) 5696 break; 5697 } 5698 maxv -= 1; 5699 maxv |= ~cst2v; 5700 minv = sgnbit; 5701 valid_p = true; 5702 break; 5703 5704 default: 5705 break; 5706 } 5707 if (valid_p 5708 && (maxv - minv) != -1) 5709 { 5710 tree tmp, new_val, type; 5711 int i; 5712 5713 for (i = 0; i < 2; i++) 5714 if (names[i]) 5715 { 5716 wide_int maxv2 = maxv; 5717 tmp = names[i]; 5718 type = TREE_TYPE (names[i]); 5719 if (!TYPE_UNSIGNED (type)) 5720 { 5721 type = build_nonstandard_integer_type (nprec, 1); 5722 tmp = build1 (NOP_EXPR, type, names[i]); 5723 } 5724 if (minv != 0) 5725 { 5726 tmp = build2 (PLUS_EXPR, type, tmp, 5727 wide_int_to_tree (type, -minv)); 5728 maxv2 = maxv - minv; 5729 } 5730 new_val = wide_int_to_tree (type, maxv2); 5731 5732 if (dump_file) 5733 { 5734 fprintf (dump_file, "Adding assert for "); 5735 print_generic_expr (dump_file, names[i], 0); 5736 fprintf (dump_file, " from "); 5737 print_generic_expr (dump_file, tmp, 0); 5738 fprintf (dump_file, "\n"); 5739 } 5740 5741 register_new_assert_for (names[i], tmp, LE_EXPR, 5742 new_val, NULL, e, bsi); 5743 } 5744 } 5745 } 5746 } 5747} 5748 5749/* OP is an operand of a truth value expression which is known to have 5750 a particular value. Register any asserts for OP and for any 5751 operands in OP's defining statement. 5752 5753 If CODE is EQ_EXPR, then we want to register OP is zero (false), 5754 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ 5755 5756static void 5757register_edge_assert_for_1 (tree op, enum tree_code code, 5758 edge e, gimple_stmt_iterator bsi) 5759{ 5760 gimple op_def; 5761 tree val; 5762 enum tree_code rhs_code; 5763 5764 /* We only care about SSA_NAMEs. */ 5765 if (TREE_CODE (op) != SSA_NAME) 5766 return; 5767 5768 /* We know that OP will have a zero or nonzero value. If OP is used 5769 more than once go ahead and register an assert for OP. */ 5770 if (live_on_edge (e, op) 5771 && !has_single_use (op)) 5772 { 5773 val = build_int_cst (TREE_TYPE (op), 0); 5774 register_new_assert_for (op, op, code, val, NULL, e, bsi); 5775 } 5776 5777 /* Now look at how OP is set. If it's set from a comparison, 5778 a truth operation or some bit operations, then we may be able 5779 to register information about the operands of that assignment. */ 5780 op_def = SSA_NAME_DEF_STMT (op); 5781 if (gimple_code (op_def) != GIMPLE_ASSIGN) 5782 return; 5783 5784 rhs_code = gimple_assign_rhs_code (op_def); 5785 5786 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) 5787 { 5788 bool invert = (code == EQ_EXPR ? true : false); 5789 tree op0 = gimple_assign_rhs1 (op_def); 5790 tree op1 = gimple_assign_rhs2 (op_def); 5791 5792 if (TREE_CODE (op0) == SSA_NAME) 5793 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert); 5794 if (TREE_CODE (op1) == SSA_NAME) 5795 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert); 5796 } 5797 else if ((code == NE_EXPR 5798 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) 5799 || (code == EQ_EXPR 5800 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) 5801 { 5802 /* Recurse on each operand. */ 5803 tree op0 = gimple_assign_rhs1 (op_def); 5804 tree op1 = gimple_assign_rhs2 (op_def); 5805 if (TREE_CODE (op0) == SSA_NAME 5806 && has_single_use (op0)) 5807 register_edge_assert_for_1 (op0, code, e, bsi); 5808 if (TREE_CODE (op1) == SSA_NAME 5809 && has_single_use (op1)) 5810 register_edge_assert_for_1 (op1, code, e, bsi); 5811 } 5812 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR 5813 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) 5814 { 5815 /* Recurse, flipping CODE. */ 5816 code = invert_tree_comparison (code, false); 5817 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); 5818 } 5819 else if (gimple_assign_rhs_code (op_def) == SSA_NAME) 5820 { 5821 /* Recurse through the copy. */ 5822 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); 5823 } 5824 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) 5825 { 5826 /* Recurse through the type conversion, unless it is a narrowing 5827 conversion or conversion from non-integral type. */ 5828 tree rhs = gimple_assign_rhs1 (op_def); 5829 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs)) 5830 && (TYPE_PRECISION (TREE_TYPE (rhs)) 5831 <= TYPE_PRECISION (TREE_TYPE (op)))) 5832 register_edge_assert_for_1 (rhs, code, e, bsi); 5833 } 5834} 5835 5836/* Try to register an edge assertion for SSA name NAME on edge E for 5837 the condition COND contributing to the conditional jump pointed to by 5838 SI. */ 5839 5840static void 5841register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, 5842 enum tree_code cond_code, tree cond_op0, 5843 tree cond_op1) 5844{ 5845 tree val; 5846 enum tree_code comp_code; 5847 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; 5848 5849 /* Do not attempt to infer anything in names that flow through 5850 abnormal edges. */ 5851 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) 5852 return; 5853 5854 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 5855 cond_op0, cond_op1, 5856 is_else_edge, 5857 &comp_code, &val)) 5858 return; 5859 5860 /* Register ASSERT_EXPRs for name. */ 5861 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, 5862 cond_op1, is_else_edge); 5863 5864 5865 /* If COND is effectively an equality test of an SSA_NAME against 5866 the value zero or one, then we may be able to assert values 5867 for SSA_NAMEs which flow into COND. */ 5868 5869 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining 5870 statement of NAME we can assert both operands of the BIT_AND_EXPR 5871 have nonzero value. */ 5872 if (((comp_code == EQ_EXPR && integer_onep (val)) 5873 || (comp_code == NE_EXPR && integer_zerop (val)))) 5874 { 5875 gimple def_stmt = SSA_NAME_DEF_STMT (name); 5876 5877 if (is_gimple_assign (def_stmt) 5878 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) 5879 { 5880 tree op0 = gimple_assign_rhs1 (def_stmt); 5881 tree op1 = gimple_assign_rhs2 (def_stmt); 5882 register_edge_assert_for_1 (op0, NE_EXPR, e, si); 5883 register_edge_assert_for_1 (op1, NE_EXPR, e, si); 5884 } 5885 } 5886 5887 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining 5888 statement of NAME we can assert both operands of the BIT_IOR_EXPR 5889 have zero value. */ 5890 if (((comp_code == EQ_EXPR && integer_zerop (val)) 5891 || (comp_code == NE_EXPR && integer_onep (val)))) 5892 { 5893 gimple def_stmt = SSA_NAME_DEF_STMT (name); 5894 5895 /* For BIT_IOR_EXPR only if NAME == 0 both operands have 5896 necessarily zero value, or if type-precision is one. */ 5897 if (is_gimple_assign (def_stmt) 5898 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR 5899 && (TYPE_PRECISION (TREE_TYPE (name)) == 1 5900 || comp_code == EQ_EXPR))) 5901 { 5902 tree op0 = gimple_assign_rhs1 (def_stmt); 5903 tree op1 = gimple_assign_rhs2 (def_stmt); 5904 register_edge_assert_for_1 (op0, EQ_EXPR, e, si); 5905 register_edge_assert_for_1 (op1, EQ_EXPR, e, si); 5906 } 5907 } 5908} 5909 5910 5911/* Determine whether the outgoing edges of BB should receive an 5912 ASSERT_EXPR for each of the operands of BB's LAST statement. 5913 The last statement of BB must be a COND_EXPR. 5914 5915 If any of the sub-graphs rooted at BB have an interesting use of 5916 the predicate operands, an assert location node is added to the 5917 list of assertions for the corresponding operands. */ 5918 5919static void 5920find_conditional_asserts (basic_block bb, gcond *last) 5921{ 5922 gimple_stmt_iterator bsi; 5923 tree op; 5924 edge_iterator ei; 5925 edge e; 5926 ssa_op_iter iter; 5927 5928 bsi = gsi_for_stmt (last); 5929 5930 /* Look for uses of the operands in each of the sub-graphs 5931 rooted at BB. We need to check each of the outgoing edges 5932 separately, so that we know what kind of ASSERT_EXPR to 5933 insert. */ 5934 FOR_EACH_EDGE (e, ei, bb->succs) 5935 { 5936 if (e->dest == bb) 5937 continue; 5938 5939 /* Register the necessary assertions for each operand in the 5940 conditional predicate. */ 5941 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) 5942 register_edge_assert_for (op, e, bsi, 5943 gimple_cond_code (last), 5944 gimple_cond_lhs (last), 5945 gimple_cond_rhs (last)); 5946 } 5947} 5948 5949struct case_info 5950{ 5951 tree expr; 5952 basic_block bb; 5953}; 5954 5955/* Compare two case labels sorting first by the destination bb index 5956 and then by the case value. */ 5957 5958static int 5959compare_case_labels (const void *p1, const void *p2) 5960{ 5961 const struct case_info *ci1 = (const struct case_info *) p1; 5962 const struct case_info *ci2 = (const struct case_info *) p2; 5963 int idx1 = ci1->bb->index; 5964 int idx2 = ci2->bb->index; 5965 5966 if (idx1 < idx2) 5967 return -1; 5968 else if (idx1 == idx2) 5969 { 5970 /* Make sure the default label is first in a group. */ 5971 if (!CASE_LOW (ci1->expr)) 5972 return -1; 5973 else if (!CASE_LOW (ci2->expr)) 5974 return 1; 5975 else 5976 return tree_int_cst_compare (CASE_LOW (ci1->expr), 5977 CASE_LOW (ci2->expr)); 5978 } 5979 else 5980 return 1; 5981} 5982 5983/* Determine whether the outgoing edges of BB should receive an 5984 ASSERT_EXPR for each of the operands of BB's LAST statement. 5985 The last statement of BB must be a SWITCH_EXPR. 5986 5987 If any of the sub-graphs rooted at BB have an interesting use of 5988 the predicate operands, an assert location node is added to the 5989 list of assertions for the corresponding operands. */ 5990 5991static void 5992find_switch_asserts (basic_block bb, gswitch *last) 5993{ 5994 gimple_stmt_iterator bsi; 5995 tree op; 5996 edge e; 5997 struct case_info *ci; 5998 size_t n = gimple_switch_num_labels (last); 5999#if GCC_VERSION >= 4000 6000 unsigned int idx; 6001#else 6002 /* Work around GCC 3.4 bug (PR 37086). */ 6003 volatile unsigned int idx; 6004#endif 6005 6006 bsi = gsi_for_stmt (last); 6007 op = gimple_switch_index (last); 6008 if (TREE_CODE (op) != SSA_NAME) 6009 return; 6010 6011 /* Build a vector of case labels sorted by destination label. */ 6012 ci = XNEWVEC (struct case_info, n); 6013 for (idx = 0; idx < n; ++idx) 6014 { 6015 ci[idx].expr = gimple_switch_label (last, idx); 6016 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); 6017 } 6018 qsort (ci, n, sizeof (struct case_info), compare_case_labels); 6019 6020 for (idx = 0; idx < n; ++idx) 6021 { 6022 tree min, max; 6023 tree cl = ci[idx].expr; 6024 basic_block cbb = ci[idx].bb; 6025 6026 min = CASE_LOW (cl); 6027 max = CASE_HIGH (cl); 6028 6029 /* If there are multiple case labels with the same destination 6030 we need to combine them to a single value range for the edge. */ 6031 if (idx + 1 < n && cbb == ci[idx + 1].bb) 6032 { 6033 /* Skip labels until the last of the group. */ 6034 do { 6035 ++idx; 6036 } while (idx < n && cbb == ci[idx].bb); 6037 --idx; 6038 6039 /* Pick up the maximum of the case label range. */ 6040 if (CASE_HIGH (ci[idx].expr)) 6041 max = CASE_HIGH (ci[idx].expr); 6042 else 6043 max = CASE_LOW (ci[idx].expr); 6044 } 6045 6046 /* Nothing to do if the range includes the default label until we 6047 can register anti-ranges. */ 6048 if (min == NULL_TREE) 6049 continue; 6050 6051 /* Find the edge to register the assert expr on. */ 6052 e = find_edge (bb, cbb); 6053 6054 /* Register the necessary assertions for the operand in the 6055 SWITCH_EXPR. */ 6056 register_edge_assert_for (op, e, bsi, 6057 max ? GE_EXPR : EQ_EXPR, 6058 op, fold_convert (TREE_TYPE (op), min)); 6059 if (max) 6060 register_edge_assert_for (op, e, bsi, LE_EXPR, op, 6061 fold_convert (TREE_TYPE (op), max)); 6062 } 6063 6064 XDELETEVEC (ci); 6065} 6066 6067 6068/* Traverse all the statements in block BB looking for statements that 6069 may generate useful assertions for the SSA names in their operand. 6070 If a statement produces a useful assertion A for name N_i, then the 6071 list of assertions already generated for N_i is scanned to 6072 determine if A is actually needed. 6073 6074 If N_i already had the assertion A at a location dominating the 6075 current location, then nothing needs to be done. Otherwise, the 6076 new location for A is recorded instead. 6077 6078 1- For every statement S in BB, all the variables used by S are 6079 added to bitmap FOUND_IN_SUBGRAPH. 6080 6081 2- If statement S uses an operand N in a way that exposes a known 6082 value range for N, then if N was not already generated by an 6083 ASSERT_EXPR, create a new assert location for N. For instance, 6084 if N is a pointer and the statement dereferences it, we can 6085 assume that N is not NULL. 6086 6087 3- COND_EXPRs are a special case of #2. We can derive range 6088 information from the predicate but need to insert different 6089 ASSERT_EXPRs for each of the sub-graphs rooted at the 6090 conditional block. If the last statement of BB is a conditional 6091 expression of the form 'X op Y', then 6092 6093 a) Remove X and Y from the set FOUND_IN_SUBGRAPH. 6094 6095 b) If the conditional is the only entry point to the sub-graph 6096 corresponding to the THEN_CLAUSE, recurse into it. On 6097 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then 6098 an ASSERT_EXPR is added for the corresponding variable. 6099 6100 c) Repeat step (b) on the ELSE_CLAUSE. 6101 6102 d) Mark X and Y in FOUND_IN_SUBGRAPH. 6103 6104 For instance, 6105 6106 if (a == 9) 6107 b = a; 6108 else 6109 b = c + 1; 6110 6111 In this case, an assertion on the THEN clause is useful to 6112 determine that 'a' is always 9 on that edge. However, an assertion 6113 on the ELSE clause would be unnecessary. 6114 6115 4- If BB does not end in a conditional expression, then we recurse 6116 into BB's dominator children. 6117 6118 At the end of the recursive traversal, every SSA name will have a 6119 list of locations where ASSERT_EXPRs should be added. When a new 6120 location for name N is found, it is registered by calling 6121 register_new_assert_for. That function keeps track of all the 6122 registered assertions to prevent adding unnecessary assertions. 6123 For instance, if a pointer P_4 is dereferenced more than once in a 6124 dominator tree, only the location dominating all the dereference of 6125 P_4 will receive an ASSERT_EXPR. */ 6126 6127static void 6128find_assert_locations_1 (basic_block bb, sbitmap live) 6129{ 6130 gimple last; 6131 6132 last = last_stmt (bb); 6133 6134 /* If BB's last statement is a conditional statement involving integer 6135 operands, determine if we need to add ASSERT_EXPRs. */ 6136 if (last 6137 && gimple_code (last) == GIMPLE_COND 6138 && !fp_predicate (last) 6139 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 6140 find_conditional_asserts (bb, as_a <gcond *> (last)); 6141 6142 /* If BB's last statement is a switch statement involving integer 6143 operands, determine if we need to add ASSERT_EXPRs. */ 6144 if (last 6145 && gimple_code (last) == GIMPLE_SWITCH 6146 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 6147 find_switch_asserts (bb, as_a <gswitch *> (last)); 6148 6149 /* Traverse all the statements in BB marking used names and looking 6150 for statements that may infer assertions for their used operands. */ 6151 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si); 6152 gsi_prev (&si)) 6153 { 6154 gimple stmt; 6155 tree op; 6156 ssa_op_iter i; 6157 6158 stmt = gsi_stmt (si); 6159 6160 if (is_gimple_debug (stmt)) 6161 continue; 6162 6163 /* See if we can derive an assertion for any of STMT's operands. */ 6164 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) 6165 { 6166 tree value; 6167 enum tree_code comp_code; 6168 6169 /* If op is not live beyond this stmt, do not bother to insert 6170 asserts for it. */ 6171 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op))) 6172 continue; 6173 6174 /* If OP is used in such a way that we can infer a value 6175 range for it, and we don't find a previous assertion for 6176 it, create a new assertion location node for OP. */ 6177 if (infer_value_range (stmt, op, &comp_code, &value)) 6178 { 6179 /* If we are able to infer a nonzero value range for OP, 6180 then walk backwards through the use-def chain to see if OP 6181 was set via a typecast. 6182 6183 If so, then we can also infer a nonzero value range 6184 for the operand of the NOP_EXPR. */ 6185 if (comp_code == NE_EXPR && integer_zerop (value)) 6186 { 6187 tree t = op; 6188 gimple def_stmt = SSA_NAME_DEF_STMT (t); 6189 6190 while (is_gimple_assign (def_stmt) 6191 && CONVERT_EXPR_CODE_P 6192 (gimple_assign_rhs_code (def_stmt)) 6193 && TREE_CODE 6194 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME 6195 && POINTER_TYPE_P 6196 (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) 6197 { 6198 t = gimple_assign_rhs1 (def_stmt); 6199 def_stmt = SSA_NAME_DEF_STMT (t); 6200 6201 /* Note we want to register the assert for the 6202 operand of the NOP_EXPR after SI, not after the 6203 conversion. */ 6204 if (! has_single_use (t)) 6205 register_new_assert_for (t, t, comp_code, value, 6206 bb, NULL, si); 6207 } 6208 } 6209 6210 register_new_assert_for (op, op, comp_code, value, bb, NULL, si); 6211 } 6212 } 6213 6214 /* Update live. */ 6215 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) 6216 bitmap_set_bit (live, SSA_NAME_VERSION (op)); 6217 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF) 6218 bitmap_clear_bit (live, SSA_NAME_VERSION (op)); 6219 } 6220 6221 /* Traverse all PHI nodes in BB, updating live. */ 6222 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); 6223 gsi_next (&si)) 6224 { 6225 use_operand_p arg_p; 6226 ssa_op_iter i; 6227 gphi *phi = si.phi (); 6228 tree res = gimple_phi_result (phi); 6229 6230 if (virtual_operand_p (res)) 6231 continue; 6232 6233 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) 6234 { 6235 tree arg = USE_FROM_PTR (arg_p); 6236 if (TREE_CODE (arg) == SSA_NAME) 6237 bitmap_set_bit (live, SSA_NAME_VERSION (arg)); 6238 } 6239 6240 bitmap_clear_bit (live, SSA_NAME_VERSION (res)); 6241 } 6242} 6243 6244/* Do an RPO walk over the function computing SSA name liveness 6245 on-the-fly and deciding on assert expressions to insert. */ 6246 6247static void 6248find_assert_locations (void) 6249{ 6250 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); 6251 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); 6252 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun)); 6253 int rpo_cnt, i; 6254 6255 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun)); 6256 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); 6257 for (i = 0; i < rpo_cnt; ++i) 6258 bb_rpo[rpo[i]] = i; 6259 6260 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to 6261 the order we compute liveness and insert asserts we otherwise 6262 fail to insert asserts into the loop latch. */ 6263 loop_p loop; 6264 FOR_EACH_LOOP (loop, 0) 6265 { 6266 i = loop->latch->index; 6267 unsigned int j = single_succ_edge (loop->latch)->dest_idx; 6268 for (gphi_iterator gsi = gsi_start_phis (loop->header); 6269 !gsi_end_p (gsi); gsi_next (&gsi)) 6270 { 6271 gphi *phi = gsi.phi (); 6272 if (virtual_operand_p (gimple_phi_result (phi))) 6273 continue; 6274 tree arg = gimple_phi_arg_def (phi, j); 6275 if (TREE_CODE (arg) == SSA_NAME) 6276 { 6277 if (live[i] == NULL) 6278 { 6279 live[i] = sbitmap_alloc (num_ssa_names); 6280 bitmap_clear (live[i]); 6281 } 6282 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg)); 6283 } 6284 } 6285 } 6286 6287 for (i = rpo_cnt - 1; i >= 0; --i) 6288 { 6289 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); 6290 edge e; 6291 edge_iterator ei; 6292 6293 if (!live[rpo[i]]) 6294 { 6295 live[rpo[i]] = sbitmap_alloc (num_ssa_names); 6296 bitmap_clear (live[rpo[i]]); 6297 } 6298 6299 /* Process BB and update the live information with uses in 6300 this block. */ 6301 find_assert_locations_1 (bb, live[rpo[i]]); 6302 6303 /* Merge liveness into the predecessor blocks and free it. */ 6304 if (!bitmap_empty_p (live[rpo[i]])) 6305 { 6306 int pred_rpo = i; 6307 FOR_EACH_EDGE (e, ei, bb->preds) 6308 { 6309 int pred = e->src->index; 6310 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK) 6311 continue; 6312 6313 if (!live[pred]) 6314 { 6315 live[pred] = sbitmap_alloc (num_ssa_names); 6316 bitmap_clear (live[pred]); 6317 } 6318 bitmap_ior (live[pred], live[pred], live[rpo[i]]); 6319 6320 if (bb_rpo[pred] < pred_rpo) 6321 pred_rpo = bb_rpo[pred]; 6322 } 6323 6324 /* Record the RPO number of the last visited block that needs 6325 live information from this block. */ 6326 last_rpo[rpo[i]] = pred_rpo; 6327 } 6328 else 6329 { 6330 sbitmap_free (live[rpo[i]]); 6331 live[rpo[i]] = NULL; 6332 } 6333 6334 /* We can free all successors live bitmaps if all their 6335 predecessors have been visited already. */ 6336 FOR_EACH_EDGE (e, ei, bb->succs) 6337 if (last_rpo[e->dest->index] == i 6338 && live[e->dest->index]) 6339 { 6340 sbitmap_free (live[e->dest->index]); 6341 live[e->dest->index] = NULL; 6342 } 6343 } 6344 6345 XDELETEVEC (rpo); 6346 XDELETEVEC (bb_rpo); 6347 XDELETEVEC (last_rpo); 6348 for (i = 0; i < last_basic_block_for_fn (cfun); ++i) 6349 if (live[i]) 6350 sbitmap_free (live[i]); 6351 XDELETEVEC (live); 6352} 6353 6354/* Create an ASSERT_EXPR for NAME and insert it in the location 6355 indicated by LOC. Return true if we made any edge insertions. */ 6356 6357static bool 6358process_assert_insertions_for (tree name, assert_locus_t loc) 6359{ 6360 /* Build the comparison expression NAME_i COMP_CODE VAL. */ 6361 gimple stmt; 6362 tree cond; 6363 gimple assert_stmt; 6364 edge_iterator ei; 6365 edge e; 6366 6367 /* If we have X <=> X do not insert an assert expr for that. */ 6368 if (loc->expr == loc->val) 6369 return false; 6370 6371 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); 6372 assert_stmt = build_assert_expr_for (cond, name); 6373 if (loc->e) 6374 { 6375 /* We have been asked to insert the assertion on an edge. This 6376 is used only by COND_EXPR and SWITCH_EXPR assertions. */ 6377 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND 6378 || (gimple_code (gsi_stmt (loc->si)) 6379 == GIMPLE_SWITCH)); 6380 6381 gsi_insert_on_edge (loc->e, assert_stmt); 6382 return true; 6383 } 6384 6385 /* Otherwise, we can insert right after LOC->SI iff the 6386 statement must not be the last statement in the block. */ 6387 stmt = gsi_stmt (loc->si); 6388 if (!stmt_ends_bb_p (stmt)) 6389 { 6390 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); 6391 return false; 6392 } 6393 6394 /* If STMT must be the last statement in BB, we can only insert new 6395 assertions on the non-abnormal edge out of BB. Note that since 6396 STMT is not control flow, there may only be one non-abnormal edge 6397 out of BB. */ 6398 FOR_EACH_EDGE (e, ei, loc->bb->succs) 6399 if (!(e->flags & EDGE_ABNORMAL)) 6400 { 6401 gsi_insert_on_edge (e, assert_stmt); 6402 return true; 6403 } 6404 6405 gcc_unreachable (); 6406} 6407 6408 6409/* Process all the insertions registered for every name N_i registered 6410 in NEED_ASSERT_FOR. The list of assertions to be inserted are 6411 found in ASSERTS_FOR[i]. */ 6412 6413static void 6414process_assert_insertions (void) 6415{ 6416 unsigned i; 6417 bitmap_iterator bi; 6418 bool update_edges_p = false; 6419 int num_asserts = 0; 6420 6421 if (dump_file && (dump_flags & TDF_DETAILS)) 6422 dump_all_asserts (dump_file); 6423 6424 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 6425 { 6426 assert_locus_t loc = asserts_for[i]; 6427 gcc_assert (loc); 6428 6429 while (loc) 6430 { 6431 assert_locus_t next = loc->next; 6432 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); 6433 free (loc); 6434 loc = next; 6435 num_asserts++; 6436 } 6437 } 6438 6439 if (update_edges_p) 6440 gsi_commit_edge_inserts (); 6441 6442 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", 6443 num_asserts); 6444} 6445 6446 6447/* Traverse the flowgraph looking for conditional jumps to insert range 6448 expressions. These range expressions are meant to provide information 6449 to optimizations that need to reason in terms of value ranges. They 6450 will not be expanded into RTL. For instance, given: 6451 6452 x = ... 6453 y = ... 6454 if (x < y) 6455 y = x - 2; 6456 else 6457 x = y + 3; 6458 6459 this pass will transform the code into: 6460 6461 x = ... 6462 y = ... 6463 if (x < y) 6464 { 6465 x = ASSERT_EXPR <x, x < y> 6466 y = x - 2 6467 } 6468 else 6469 { 6470 y = ASSERT_EXPR <y, x >= y> 6471 x = y + 3 6472 } 6473 6474 The idea is that once copy and constant propagation have run, other 6475 optimizations will be able to determine what ranges of values can 'x' 6476 take in different paths of the code, simply by checking the reaching 6477 definition of 'x'. */ 6478 6479static void 6480insert_range_assertions (void) 6481{ 6482 need_assert_for = BITMAP_ALLOC (NULL); 6483 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); 6484 6485 calculate_dominance_info (CDI_DOMINATORS); 6486 6487 find_assert_locations (); 6488 if (!bitmap_empty_p (need_assert_for)) 6489 { 6490 process_assert_insertions (); 6491 update_ssa (TODO_update_ssa_no_phi); 6492 } 6493 6494 if (dump_file && (dump_flags & TDF_DETAILS)) 6495 { 6496 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); 6497 dump_function_to_file (current_function_decl, dump_file, dump_flags); 6498 } 6499 6500 free (asserts_for); 6501 BITMAP_FREE (need_assert_for); 6502} 6503 6504/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays 6505 and "struct" hacks. If VRP can determine that the 6506 array subscript is a constant, check if it is outside valid 6507 range. If the array subscript is a RANGE, warn if it is 6508 non-overlapping with valid range. 6509 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ 6510 6511static void 6512check_array_ref (location_t location, tree ref, bool ignore_off_by_one) 6513{ 6514 value_range_t* vr = NULL; 6515 tree low_sub, up_sub; 6516 tree low_bound, up_bound, up_bound_p1; 6517 tree base; 6518 6519 if (TREE_NO_WARNING (ref)) 6520 return; 6521 6522 low_sub = up_sub = TREE_OPERAND (ref, 1); 6523 up_bound = array_ref_up_bound (ref); 6524 6525 /* Can not check flexible arrays. */ 6526 if (!up_bound 6527 || TREE_CODE (up_bound) != INTEGER_CST) 6528 return; 6529 6530 /* Accesses to trailing arrays via pointers may access storage 6531 beyond the types array bounds. */ 6532 base = get_base_address (ref); 6533 if ((warn_array_bounds < 2) 6534 && base && TREE_CODE (base) == MEM_REF) 6535 { 6536 tree cref, next = NULL_TREE; 6537 6538 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) 6539 return; 6540 6541 cref = TREE_OPERAND (ref, 0); 6542 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) 6543 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); 6544 next && TREE_CODE (next) != FIELD_DECL; 6545 next = DECL_CHAIN (next)) 6546 ; 6547 6548 /* If this is the last field in a struct type or a field in a 6549 union type do not warn. */ 6550 if (!next) 6551 return; 6552 } 6553 6554 low_bound = array_ref_low_bound (ref); 6555 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, 6556 build_int_cst (TREE_TYPE (up_bound), 1)); 6557 6558 if (TREE_CODE (low_sub) == SSA_NAME) 6559 { 6560 vr = get_value_range (low_sub); 6561 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 6562 { 6563 low_sub = vr->type == VR_RANGE ? vr->max : vr->min; 6564 up_sub = vr->type == VR_RANGE ? vr->min : vr->max; 6565 } 6566 } 6567 6568 if (vr && vr->type == VR_ANTI_RANGE) 6569 { 6570 if (TREE_CODE (up_sub) == INTEGER_CST 6571 && tree_int_cst_lt (up_bound, up_sub) 6572 && TREE_CODE (low_sub) == INTEGER_CST 6573 && tree_int_cst_lt (low_sub, low_bound)) 6574 { 6575 warning_at (location, OPT_Warray_bounds, 6576 "array subscript is outside array bounds"); 6577 TREE_NO_WARNING (ref) = 1; 6578 } 6579 } 6580 else if (TREE_CODE (up_sub) == INTEGER_CST 6581 && (ignore_off_by_one 6582 ? (tree_int_cst_lt (up_bound, up_sub) 6583 && !tree_int_cst_equal (up_bound_p1, up_sub)) 6584 : (tree_int_cst_lt (up_bound, up_sub) 6585 || tree_int_cst_equal (up_bound_p1, up_sub)))) 6586 { 6587 if (dump_file && (dump_flags & TDF_DETAILS)) 6588 { 6589 fprintf (dump_file, "Array bound warning for "); 6590 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); 6591 fprintf (dump_file, "\n"); 6592 } 6593 warning_at (location, OPT_Warray_bounds, 6594 "array subscript is above array bounds"); 6595 TREE_NO_WARNING (ref) = 1; 6596 } 6597 else if (TREE_CODE (low_sub) == INTEGER_CST 6598 && tree_int_cst_lt (low_sub, low_bound)) 6599 { 6600 if (dump_file && (dump_flags & TDF_DETAILS)) 6601 { 6602 fprintf (dump_file, "Array bound warning for "); 6603 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); 6604 fprintf (dump_file, "\n"); 6605 } 6606 warning_at (location, OPT_Warray_bounds, 6607 "array subscript is below array bounds"); 6608 TREE_NO_WARNING (ref) = 1; 6609 } 6610} 6611 6612/* Searches if the expr T, located at LOCATION computes 6613 address of an ARRAY_REF, and call check_array_ref on it. */ 6614 6615static void 6616search_for_addr_array (tree t, location_t location) 6617{ 6618 while (TREE_CODE (t) == SSA_NAME) 6619 { 6620 gimple g = SSA_NAME_DEF_STMT (t); 6621 6622 if (gimple_code (g) != GIMPLE_ASSIGN) 6623 return; 6624 6625 if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) 6626 != GIMPLE_SINGLE_RHS) 6627 return; 6628 6629 t = gimple_assign_rhs1 (g); 6630 } 6631 6632 6633 /* We are only interested in addresses of ARRAY_REF's. */ 6634 if (TREE_CODE (t) != ADDR_EXPR) 6635 return; 6636 6637 /* Check each ARRAY_REFs in the reference chain. */ 6638 do 6639 { 6640 if (TREE_CODE (t) == ARRAY_REF) 6641 check_array_ref (location, t, true /*ignore_off_by_one*/); 6642 6643 t = TREE_OPERAND (t, 0); 6644 } 6645 while (handled_component_p (t)); 6646 6647 if (TREE_CODE (t) == MEM_REF 6648 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR 6649 && !TREE_NO_WARNING (t)) 6650 { 6651 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); 6652 tree low_bound, up_bound, el_sz; 6653 offset_int idx; 6654 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE 6655 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE 6656 || !TYPE_DOMAIN (TREE_TYPE (tem))) 6657 return; 6658 6659 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); 6660 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); 6661 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); 6662 if (!low_bound 6663 || TREE_CODE (low_bound) != INTEGER_CST 6664 || !up_bound 6665 || TREE_CODE (up_bound) != INTEGER_CST 6666 || !el_sz 6667 || TREE_CODE (el_sz) != INTEGER_CST) 6668 return; 6669 6670 idx = mem_ref_offset (t); 6671 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); 6672 if (wi::lts_p (idx, 0)) 6673 { 6674 if (dump_file && (dump_flags & TDF_DETAILS)) 6675 { 6676 fprintf (dump_file, "Array bound warning for "); 6677 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); 6678 fprintf (dump_file, "\n"); 6679 } 6680 warning_at (location, OPT_Warray_bounds, 6681 "array subscript is below array bounds"); 6682 TREE_NO_WARNING (t) = 1; 6683 } 6684 else if (wi::gts_p (idx, (wi::to_offset (up_bound) 6685 - wi::to_offset (low_bound) + 1))) 6686 { 6687 if (dump_file && (dump_flags & TDF_DETAILS)) 6688 { 6689 fprintf (dump_file, "Array bound warning for "); 6690 dump_generic_expr (MSG_NOTE, TDF_SLIM, t); 6691 fprintf (dump_file, "\n"); 6692 } 6693 warning_at (location, OPT_Warray_bounds, 6694 "array subscript is above array bounds"); 6695 TREE_NO_WARNING (t) = 1; 6696 } 6697 } 6698} 6699 6700/* walk_tree() callback that checks if *TP is 6701 an ARRAY_REF inside an ADDR_EXPR (in which an array 6702 subscript one outside the valid range is allowed). Call 6703 check_array_ref for each ARRAY_REF found. The location is 6704 passed in DATA. */ 6705 6706static tree 6707check_array_bounds (tree *tp, int *walk_subtree, void *data) 6708{ 6709 tree t = *tp; 6710 struct walk_stmt_info *wi = (struct walk_stmt_info *) data; 6711 location_t location; 6712 6713 if (EXPR_HAS_LOCATION (t)) 6714 location = EXPR_LOCATION (t); 6715 else 6716 { 6717 location_t *locp = (location_t *) wi->info; 6718 location = *locp; 6719 } 6720 6721 *walk_subtree = TRUE; 6722 6723 if (TREE_CODE (t) == ARRAY_REF) 6724 check_array_ref (location, t, false /*ignore_off_by_one*/); 6725 6726 if (TREE_CODE (t) == MEM_REF 6727 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) 6728 search_for_addr_array (TREE_OPERAND (t, 0), location); 6729 6730 if (TREE_CODE (t) == ADDR_EXPR) 6731 *walk_subtree = FALSE; 6732 6733 return NULL_TREE; 6734} 6735 6736/* Walk over all statements of all reachable BBs and call check_array_bounds 6737 on them. */ 6738 6739static void 6740check_all_array_refs (void) 6741{ 6742 basic_block bb; 6743 gimple_stmt_iterator si; 6744 6745 FOR_EACH_BB_FN (bb, cfun) 6746 { 6747 edge_iterator ei; 6748 edge e; 6749 bool executable = false; 6750 6751 /* Skip blocks that were found to be unreachable. */ 6752 FOR_EACH_EDGE (e, ei, bb->preds) 6753 executable |= !!(e->flags & EDGE_EXECUTABLE); 6754 if (!executable) 6755 continue; 6756 6757 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 6758 { 6759 gimple stmt = gsi_stmt (si); 6760 struct walk_stmt_info wi; 6761 if (!gimple_has_location (stmt)) 6762 continue; 6763 6764 if (is_gimple_call (stmt)) 6765 { 6766 size_t i; 6767 size_t n = gimple_call_num_args (stmt); 6768 for (i = 0; i < n; i++) 6769 { 6770 tree arg = gimple_call_arg (stmt, i); 6771 search_for_addr_array (arg, gimple_location (stmt)); 6772 } 6773 } 6774 else 6775 { 6776 memset (&wi, 0, sizeof (wi)); 6777 wi.info = CONST_CAST (void *, (const void *) 6778 gimple_location_ptr (stmt)); 6779 6780 walk_gimple_op (gsi_stmt (si), 6781 check_array_bounds, 6782 &wi); 6783 } 6784 } 6785 } 6786} 6787 6788/* Return true if all imm uses of VAR are either in STMT, or 6789 feed (optionally through a chain of single imm uses) GIMPLE_COND 6790 in basic block COND_BB. */ 6791 6792static bool 6793all_imm_uses_in_stmt_or_feed_cond (tree var, gimple stmt, basic_block cond_bb) 6794{ 6795 use_operand_p use_p, use2_p; 6796 imm_use_iterator iter; 6797 6798 FOR_EACH_IMM_USE_FAST (use_p, iter, var) 6799 if (USE_STMT (use_p) != stmt) 6800 { 6801 gimple use_stmt = USE_STMT (use_p), use_stmt2; 6802 if (is_gimple_debug (use_stmt)) 6803 continue; 6804 while (is_gimple_assign (use_stmt) 6805 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME 6806 && single_imm_use (gimple_assign_lhs (use_stmt), 6807 &use2_p, &use_stmt2)) 6808 use_stmt = use_stmt2; 6809 if (gimple_code (use_stmt) != GIMPLE_COND 6810 || gimple_bb (use_stmt) != cond_bb) 6811 return false; 6812 } 6813 return true; 6814} 6815 6816/* Handle 6817 _4 = x_3 & 31; 6818 if (_4 != 0) 6819 goto <bb 6>; 6820 else 6821 goto <bb 7>; 6822 <bb 6>: 6823 __builtin_unreachable (); 6824 <bb 7>: 6825 x_5 = ASSERT_EXPR <x_3, ...>; 6826 If x_3 has no other immediate uses (checked by caller), 6827 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits 6828 from the non-zero bitmask. */ 6829 6830static void 6831maybe_set_nonzero_bits (basic_block bb, tree var) 6832{ 6833 edge e = single_pred_edge (bb); 6834 basic_block cond_bb = e->src; 6835 gimple stmt = last_stmt (cond_bb); 6836 tree cst; 6837 6838 if (stmt == NULL 6839 || gimple_code (stmt) != GIMPLE_COND 6840 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE) 6841 ? EQ_EXPR : NE_EXPR) 6842 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME 6843 || !integer_zerop (gimple_cond_rhs (stmt))) 6844 return; 6845 6846 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt)); 6847 if (!is_gimple_assign (stmt) 6848 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR 6849 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST) 6850 return; 6851 if (gimple_assign_rhs1 (stmt) != var) 6852 { 6853 gimple stmt2; 6854 6855 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) 6856 return; 6857 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); 6858 if (!gimple_assign_cast_p (stmt2) 6859 || gimple_assign_rhs1 (stmt2) != var 6860 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2)) 6861 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))) 6862 != TYPE_PRECISION (TREE_TYPE (var)))) 6863 return; 6864 } 6865 cst = gimple_assign_rhs2 (stmt); 6866 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst)); 6867} 6868 6869/* Convert range assertion expressions into the implied copies and 6870 copy propagate away the copies. Doing the trivial copy propagation 6871 here avoids the need to run the full copy propagation pass after 6872 VRP. 6873 6874 FIXME, this will eventually lead to copy propagation removing the 6875 names that had useful range information attached to them. For 6876 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, 6877 then N_i will have the range [3, +INF]. 6878 6879 However, by converting the assertion into the implied copy 6880 operation N_i = N_j, we will then copy-propagate N_j into the uses 6881 of N_i and lose the range information. We may want to hold on to 6882 ASSERT_EXPRs a little while longer as the ranges could be used in 6883 things like jump threading. 6884 6885 The problem with keeping ASSERT_EXPRs around is that passes after 6886 VRP need to handle them appropriately. 6887 6888 Another approach would be to make the range information a first 6889 class property of the SSA_NAME so that it can be queried from 6890 any pass. This is made somewhat more complex by the need for 6891 multiple ranges to be associated with one SSA_NAME. */ 6892 6893static void 6894remove_range_assertions (void) 6895{ 6896 basic_block bb; 6897 gimple_stmt_iterator si; 6898 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of 6899 a basic block preceeded by GIMPLE_COND branching to it and 6900 __builtin_trap, -1 if not yet checked, 0 otherwise. */ 6901 int is_unreachable; 6902 6903 /* Note that the BSI iterator bump happens at the bottom of the 6904 loop and no bump is necessary if we're removing the statement 6905 referenced by the current BSI. */ 6906 FOR_EACH_BB_FN (bb, cfun) 6907 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);) 6908 { 6909 gimple stmt = gsi_stmt (si); 6910 gimple use_stmt; 6911 6912 if (is_gimple_assign (stmt) 6913 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) 6914 { 6915 tree lhs = gimple_assign_lhs (stmt); 6916 tree rhs = gimple_assign_rhs1 (stmt); 6917 tree var; 6918 tree cond = fold (ASSERT_EXPR_COND (rhs)); 6919 use_operand_p use_p; 6920 imm_use_iterator iter; 6921 6922 gcc_assert (cond != boolean_false_node); 6923 6924 var = ASSERT_EXPR_VAR (rhs); 6925 gcc_assert (TREE_CODE (var) == SSA_NAME); 6926 6927 if (!POINTER_TYPE_P (TREE_TYPE (lhs)) 6928 && SSA_NAME_RANGE_INFO (lhs)) 6929 { 6930 if (is_unreachable == -1) 6931 { 6932 is_unreachable = 0; 6933 if (single_pred_p (bb) 6934 && assert_unreachable_fallthru_edge_p 6935 (single_pred_edge (bb))) 6936 is_unreachable = 1; 6937 } 6938 /* Handle 6939 if (x_7 >= 10 && x_7 < 20) 6940 __builtin_unreachable (); 6941 x_8 = ASSERT_EXPR <x_7, ...>; 6942 if the only uses of x_7 are in the ASSERT_EXPR and 6943 in the condition. In that case, we can copy the 6944 range info from x_8 computed in this pass also 6945 for x_7. */ 6946 if (is_unreachable 6947 && all_imm_uses_in_stmt_or_feed_cond (var, stmt, 6948 single_pred (bb))) 6949 { 6950 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), 6951 SSA_NAME_RANGE_INFO (lhs)->get_min (), 6952 SSA_NAME_RANGE_INFO (lhs)->get_max ()); 6953 maybe_set_nonzero_bits (bb, var); 6954 } 6955 } 6956 6957 /* Propagate the RHS into every use of the LHS. */ 6958 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) 6959 FOR_EACH_IMM_USE_ON_STMT (use_p, iter) 6960 SET_USE (use_p, var); 6961 6962 /* And finally, remove the copy, it is not needed. */ 6963 gsi_remove (&si, true); 6964 release_defs (stmt); 6965 } 6966 else 6967 { 6968 if (!is_gimple_debug (gsi_stmt (si))) 6969 is_unreachable = 0; 6970 gsi_next (&si); 6971 } 6972 } 6973} 6974 6975 6976/* Return true if STMT is interesting for VRP. */ 6977 6978static bool 6979stmt_interesting_for_vrp (gimple stmt) 6980{ 6981 if (gimple_code (stmt) == GIMPLE_PHI) 6982 { 6983 tree res = gimple_phi_result (stmt); 6984 return (!virtual_operand_p (res) 6985 && (INTEGRAL_TYPE_P (TREE_TYPE (res)) 6986 || POINTER_TYPE_P (TREE_TYPE (res)))); 6987 } 6988 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 6989 { 6990 tree lhs = gimple_get_lhs (stmt); 6991 6992 /* In general, assignments with virtual operands are not useful 6993 for deriving ranges, with the obvious exception of calls to 6994 builtin functions. */ 6995 if (lhs && TREE_CODE (lhs) == SSA_NAME 6996 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 6997 || POINTER_TYPE_P (TREE_TYPE (lhs))) 6998 && (is_gimple_call (stmt) 6999 || !gimple_vuse (stmt))) 7000 return true; 7001 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) 7002 switch (gimple_call_internal_fn (stmt)) 7003 { 7004 case IFN_ADD_OVERFLOW: 7005 case IFN_SUB_OVERFLOW: 7006 case IFN_MUL_OVERFLOW: 7007 /* These internal calls return _Complex integer type, 7008 but are interesting to VRP nevertheless. */ 7009 if (lhs && TREE_CODE (lhs) == SSA_NAME) 7010 return true; 7011 break; 7012 default: 7013 break; 7014 } 7015 } 7016 else if (gimple_code (stmt) == GIMPLE_COND 7017 || gimple_code (stmt) == GIMPLE_SWITCH) 7018 return true; 7019 7020 return false; 7021} 7022 7023 7024/* Initialize local data structures for VRP. */ 7025 7026static void 7027vrp_initialize (void) 7028{ 7029 basic_block bb; 7030 7031 values_propagated = false; 7032 num_vr_values = num_ssa_names; 7033 vr_value = XCNEWVEC (value_range_t *, num_vr_values); 7034 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); 7035 7036 FOR_EACH_BB_FN (bb, cfun) 7037 { 7038 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); 7039 gsi_next (&si)) 7040 { 7041 gphi *phi = si.phi (); 7042 if (!stmt_interesting_for_vrp (phi)) 7043 { 7044 tree lhs = PHI_RESULT (phi); 7045 set_value_range_to_varying (get_value_range (lhs)); 7046 prop_set_simulate_again (phi, false); 7047 } 7048 else 7049 prop_set_simulate_again (phi, true); 7050 } 7051 7052 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); 7053 gsi_next (&si)) 7054 { 7055 gimple stmt = gsi_stmt (si); 7056 7057 /* If the statement is a control insn, then we do not 7058 want to avoid simulating the statement once. Failure 7059 to do so means that those edges will never get added. */ 7060 if (stmt_ends_bb_p (stmt)) 7061 prop_set_simulate_again (stmt, true); 7062 else if (!stmt_interesting_for_vrp (stmt)) 7063 { 7064 ssa_op_iter i; 7065 tree def; 7066 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) 7067 set_value_range_to_varying (get_value_range (def)); 7068 prop_set_simulate_again (stmt, false); 7069 } 7070 else 7071 prop_set_simulate_again (stmt, true); 7072 } 7073 } 7074} 7075 7076/* Return the singleton value-range for NAME or NAME. */ 7077 7078static inline tree 7079vrp_valueize (tree name) 7080{ 7081 if (TREE_CODE (name) == SSA_NAME) 7082 { 7083 value_range_t *vr = get_value_range (name); 7084 if (vr->type == VR_RANGE 7085 && (vr->min == vr->max 7086 || operand_equal_p (vr->min, vr->max, 0))) 7087 return vr->min; 7088 } 7089 return name; 7090} 7091 7092/* Return the singleton value-range for NAME if that is a constant 7093 but signal to not follow SSA edges. */ 7094 7095static inline tree 7096vrp_valueize_1 (tree name) 7097{ 7098 if (TREE_CODE (name) == SSA_NAME) 7099 { 7100 /* If the definition may be simulated again we cannot follow 7101 this SSA edge as the SSA propagator does not necessarily 7102 re-visit the use. */ 7103 gimple def_stmt = SSA_NAME_DEF_STMT (name); 7104 if (!gimple_nop_p (def_stmt) 7105 && prop_simulate_again_p (def_stmt)) 7106 return NULL_TREE; 7107 value_range_t *vr = get_value_range (name); 7108 if (range_int_cst_singleton_p (vr)) 7109 return vr->min; 7110 } 7111 return name; 7112} 7113 7114/* Visit assignment STMT. If it produces an interesting range, record 7115 the SSA name in *OUTPUT_P. */ 7116 7117static enum ssa_prop_result 7118vrp_visit_assignment_or_call (gimple stmt, tree *output_p) 7119{ 7120 tree def, lhs; 7121 ssa_op_iter iter; 7122 enum gimple_code code = gimple_code (stmt); 7123 lhs = gimple_get_lhs (stmt); 7124 7125 /* We only keep track of ranges in integral and pointer types. */ 7126 if (TREE_CODE (lhs) == SSA_NAME 7127 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 7128 /* It is valid to have NULL MIN/MAX values on a type. See 7129 build_range_type. */ 7130 && TYPE_MIN_VALUE (TREE_TYPE (lhs)) 7131 && TYPE_MAX_VALUE (TREE_TYPE (lhs))) 7132 || POINTER_TYPE_P (TREE_TYPE (lhs)))) 7133 { 7134 value_range_t new_vr = VR_INITIALIZER; 7135 7136 /* Try folding the statement to a constant first. */ 7137 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize, 7138 vrp_valueize_1); 7139 if (tem && is_gimple_min_invariant (tem)) 7140 set_value_range_to_value (&new_vr, tem, NULL); 7141 /* Then dispatch to value-range extracting functions. */ 7142 else if (code == GIMPLE_CALL) 7143 extract_range_basic (&new_vr, stmt); 7144 else 7145 extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt)); 7146 7147 if (update_value_range (lhs, &new_vr)) 7148 { 7149 *output_p = lhs; 7150 7151 if (dump_file && (dump_flags & TDF_DETAILS)) 7152 { 7153 fprintf (dump_file, "Found new range for "); 7154 print_generic_expr (dump_file, lhs, 0); 7155 fprintf (dump_file, ": "); 7156 dump_value_range (dump_file, &new_vr); 7157 fprintf (dump_file, "\n"); 7158 } 7159 7160 if (new_vr.type == VR_VARYING) 7161 return SSA_PROP_VARYING; 7162 7163 return SSA_PROP_INTERESTING; 7164 } 7165 7166 return SSA_PROP_NOT_INTERESTING; 7167 } 7168 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) 7169 switch (gimple_call_internal_fn (stmt)) 7170 { 7171 case IFN_ADD_OVERFLOW: 7172 case IFN_SUB_OVERFLOW: 7173 case IFN_MUL_OVERFLOW: 7174 /* These internal calls return _Complex integer type, 7175 which VRP does not track, but the immediate uses 7176 thereof might be interesting. */ 7177 if (lhs && TREE_CODE (lhs) == SSA_NAME) 7178 { 7179 imm_use_iterator iter; 7180 use_operand_p use_p; 7181 enum ssa_prop_result res = SSA_PROP_VARYING; 7182 7183 set_value_range_to_varying (get_value_range (lhs)); 7184 7185 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs) 7186 { 7187 gimple use_stmt = USE_STMT (use_p); 7188 if (!is_gimple_assign (use_stmt)) 7189 continue; 7190 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt); 7191 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR) 7192 continue; 7193 tree rhs1 = gimple_assign_rhs1 (use_stmt); 7194 tree use_lhs = gimple_assign_lhs (use_stmt); 7195 if (TREE_CODE (rhs1) != rhs_code 7196 || TREE_OPERAND (rhs1, 0) != lhs 7197 || TREE_CODE (use_lhs) != SSA_NAME 7198 || !stmt_interesting_for_vrp (use_stmt) 7199 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs)) 7200 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs)) 7201 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs)))) 7202 continue; 7203 7204 /* If there is a change in the value range for any of the 7205 REALPART_EXPR/IMAGPART_EXPR immediate uses, return 7206 SSA_PROP_INTERESTING. If there are any REALPART_EXPR 7207 or IMAGPART_EXPR immediate uses, but none of them have 7208 a change in their value ranges, return 7209 SSA_PROP_NOT_INTERESTING. If there are no 7210 {REAL,IMAG}PART_EXPR uses at all, 7211 return SSA_PROP_VARYING. */ 7212 value_range_t new_vr = VR_INITIALIZER; 7213 extract_range_basic (&new_vr, use_stmt); 7214 value_range_t *old_vr = get_value_range (use_lhs); 7215 if (old_vr->type != new_vr.type 7216 || !vrp_operand_equal_p (old_vr->min, new_vr.min) 7217 || !vrp_operand_equal_p (old_vr->max, new_vr.max) 7218 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv)) 7219 res = SSA_PROP_INTERESTING; 7220 else 7221 res = SSA_PROP_NOT_INTERESTING; 7222 BITMAP_FREE (new_vr.equiv); 7223 if (res == SSA_PROP_INTERESTING) 7224 { 7225 *output_p = lhs; 7226 return res; 7227 } 7228 } 7229 7230 return res; 7231 } 7232 break; 7233 default: 7234 break; 7235 } 7236 7237 /* Every other statement produces no useful ranges. */ 7238 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 7239 set_value_range_to_varying (get_value_range (def)); 7240 7241 return SSA_PROP_VARYING; 7242} 7243 7244/* Helper that gets the value range of the SSA_NAME with version I 7245 or a symbolic range containing the SSA_NAME only if the value range 7246 is varying or undefined. */ 7247 7248static inline value_range_t 7249get_vr_for_comparison (int i) 7250{ 7251 value_range_t vr = *get_value_range (ssa_name (i)); 7252 7253 /* If name N_i does not have a valid range, use N_i as its own 7254 range. This allows us to compare against names that may 7255 have N_i in their ranges. */ 7256 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) 7257 { 7258 vr.type = VR_RANGE; 7259 vr.min = ssa_name (i); 7260 vr.max = ssa_name (i); 7261 } 7262 7263 return vr; 7264} 7265 7266/* Compare all the value ranges for names equivalent to VAR with VAL 7267 using comparison code COMP. Return the same value returned by 7268 compare_range_with_value, including the setting of 7269 *STRICT_OVERFLOW_P. */ 7270 7271static tree 7272compare_name_with_value (enum tree_code comp, tree var, tree val, 7273 bool *strict_overflow_p) 7274{ 7275 bitmap_iterator bi; 7276 unsigned i; 7277 bitmap e; 7278 tree retval, t; 7279 int used_strict_overflow; 7280 bool sop; 7281 value_range_t equiv_vr; 7282 7283 /* Get the set of equivalences for VAR. */ 7284 e = get_value_range (var)->equiv; 7285 7286 /* Start at -1. Set it to 0 if we do a comparison without relying 7287 on overflow, or 1 if all comparisons rely on overflow. */ 7288 used_strict_overflow = -1; 7289 7290 /* Compare vars' value range with val. */ 7291 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); 7292 sop = false; 7293 retval = compare_range_with_value (comp, &equiv_vr, val, &sop); 7294 if (retval) 7295 used_strict_overflow = sop ? 1 : 0; 7296 7297 /* If the equiv set is empty we have done all work we need to do. */ 7298 if (e == NULL) 7299 { 7300 if (retval 7301 && used_strict_overflow > 0) 7302 *strict_overflow_p = true; 7303 return retval; 7304 } 7305 7306 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) 7307 { 7308 equiv_vr = get_vr_for_comparison (i); 7309 sop = false; 7310 t = compare_range_with_value (comp, &equiv_vr, val, &sop); 7311 if (t) 7312 { 7313 /* If we get different answers from different members 7314 of the equivalence set this check must be in a dead 7315 code region. Folding it to a trap representation 7316 would be correct here. For now just return don't-know. */ 7317 if (retval != NULL 7318 && t != retval) 7319 { 7320 retval = NULL_TREE; 7321 break; 7322 } 7323 retval = t; 7324 7325 if (!sop) 7326 used_strict_overflow = 0; 7327 else if (used_strict_overflow < 0) 7328 used_strict_overflow = 1; 7329 } 7330 } 7331 7332 if (retval 7333 && used_strict_overflow > 0) 7334 *strict_overflow_p = true; 7335 7336 return retval; 7337} 7338 7339 7340/* Given a comparison code COMP and names N1 and N2, compare all the 7341 ranges equivalent to N1 against all the ranges equivalent to N2 7342 to determine the value of N1 COMP N2. Return the same value 7343 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate 7344 whether we relied on an overflow infinity in the comparison. */ 7345 7346 7347static tree 7348compare_names (enum tree_code comp, tree n1, tree n2, 7349 bool *strict_overflow_p) 7350{ 7351 tree t, retval; 7352 bitmap e1, e2; 7353 bitmap_iterator bi1, bi2; 7354 unsigned i1, i2; 7355 int used_strict_overflow; 7356 static bitmap_obstack *s_obstack = NULL; 7357 static bitmap s_e1 = NULL, s_e2 = NULL; 7358 7359 /* Compare the ranges of every name equivalent to N1 against the 7360 ranges of every name equivalent to N2. */ 7361 e1 = get_value_range (n1)->equiv; 7362 e2 = get_value_range (n2)->equiv; 7363 7364 /* Use the fake bitmaps if e1 or e2 are not available. */ 7365 if (s_obstack == NULL) 7366 { 7367 s_obstack = XNEW (bitmap_obstack); 7368 bitmap_obstack_initialize (s_obstack); 7369 s_e1 = BITMAP_ALLOC (s_obstack); 7370 s_e2 = BITMAP_ALLOC (s_obstack); 7371 } 7372 if (e1 == NULL) 7373 e1 = s_e1; 7374 if (e2 == NULL) 7375 e2 = s_e2; 7376 7377 /* Add N1 and N2 to their own set of equivalences to avoid 7378 duplicating the body of the loop just to check N1 and N2 7379 ranges. */ 7380 bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); 7381 bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); 7382 7383 /* If the equivalence sets have a common intersection, then the two 7384 names can be compared without checking their ranges. */ 7385 if (bitmap_intersect_p (e1, e2)) 7386 { 7387 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 7388 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 7389 7390 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) 7391 ? boolean_true_node 7392 : boolean_false_node; 7393 } 7394 7395 /* Start at -1. Set it to 0 if we do a comparison without relying 7396 on overflow, or 1 if all comparisons rely on overflow. */ 7397 used_strict_overflow = -1; 7398 7399 /* Otherwise, compare all the equivalent ranges. First, add N1 and 7400 N2 to their own set of equivalences to avoid duplicating the body 7401 of the loop just to check N1 and N2 ranges. */ 7402 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) 7403 { 7404 value_range_t vr1 = get_vr_for_comparison (i1); 7405 7406 t = retval = NULL_TREE; 7407 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) 7408 { 7409 bool sop = false; 7410 7411 value_range_t vr2 = get_vr_for_comparison (i2); 7412 7413 t = compare_ranges (comp, &vr1, &vr2, &sop); 7414 if (t) 7415 { 7416 /* If we get different answers from different members 7417 of the equivalence set this check must be in a dead 7418 code region. Folding it to a trap representation 7419 would be correct here. For now just return don't-know. */ 7420 if (retval != NULL 7421 && t != retval) 7422 { 7423 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 7424 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 7425 return NULL_TREE; 7426 } 7427 retval = t; 7428 7429 if (!sop) 7430 used_strict_overflow = 0; 7431 else if (used_strict_overflow < 0) 7432 used_strict_overflow = 1; 7433 } 7434 } 7435 7436 if (retval) 7437 { 7438 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 7439 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 7440 if (used_strict_overflow > 0) 7441 *strict_overflow_p = true; 7442 return retval; 7443 } 7444 } 7445 7446 /* None of the equivalent ranges are useful in computing this 7447 comparison. */ 7448 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 7449 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 7450 return NULL_TREE; 7451} 7452 7453/* Helper function for vrp_evaluate_conditional_warnv. */ 7454 7455static tree 7456vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, 7457 tree op0, tree op1, 7458 bool * strict_overflow_p) 7459{ 7460 value_range_t *vr0, *vr1; 7461 7462 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; 7463 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; 7464 7465 tree res = NULL_TREE; 7466 if (vr0 && vr1) 7467 res = compare_ranges (code, vr0, vr1, strict_overflow_p); 7468 if (!res && vr0) 7469 res = compare_range_with_value (code, vr0, op1, strict_overflow_p); 7470 if (!res && vr1) 7471 res = (compare_range_with_value 7472 (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); 7473 return res; 7474} 7475 7476/* Helper function for vrp_evaluate_conditional_warnv. */ 7477 7478static tree 7479vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, 7480 tree op1, bool use_equiv_p, 7481 bool *strict_overflow_p, bool *only_ranges) 7482{ 7483 tree ret; 7484 if (only_ranges) 7485 *only_ranges = true; 7486 7487 /* We only deal with integral and pointer types. */ 7488 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) 7489 && !POINTER_TYPE_P (TREE_TYPE (op0))) 7490 return NULL_TREE; 7491 7492 if (use_equiv_p) 7493 { 7494 if (only_ranges 7495 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges 7496 (code, op0, op1, strict_overflow_p))) 7497 return ret; 7498 *only_ranges = false; 7499 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) 7500 return compare_names (code, op0, op1, strict_overflow_p); 7501 else if (TREE_CODE (op0) == SSA_NAME) 7502 return compare_name_with_value (code, op0, op1, strict_overflow_p); 7503 else if (TREE_CODE (op1) == SSA_NAME) 7504 return (compare_name_with_value 7505 (swap_tree_comparison (code), op1, op0, strict_overflow_p)); 7506 } 7507 else 7508 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, 7509 strict_overflow_p); 7510 return NULL_TREE; 7511} 7512 7513/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range 7514 information. Return NULL if the conditional can not be evaluated. 7515 The ranges of all the names equivalent with the operands in COND 7516 will be used when trying to compute the value. If the result is 7517 based on undefined signed overflow, issue a warning if 7518 appropriate. */ 7519 7520static tree 7521vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) 7522{ 7523 bool sop; 7524 tree ret; 7525 bool only_ranges; 7526 7527 /* Some passes and foldings leak constants with overflow flag set 7528 into the IL. Avoid doing wrong things with these and bail out. */ 7529 if ((TREE_CODE (op0) == INTEGER_CST 7530 && TREE_OVERFLOW (op0)) 7531 || (TREE_CODE (op1) == INTEGER_CST 7532 && TREE_OVERFLOW (op1))) 7533 return NULL_TREE; 7534 7535 sop = false; 7536 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, 7537 &only_ranges); 7538 7539 if (ret && sop) 7540 { 7541 enum warn_strict_overflow_code wc; 7542 const char* warnmsg; 7543 7544 if (is_gimple_min_invariant (ret)) 7545 { 7546 wc = WARN_STRICT_OVERFLOW_CONDITIONAL; 7547 warnmsg = G_("assuming signed overflow does not occur when " 7548 "simplifying conditional to constant"); 7549 } 7550 else 7551 { 7552 wc = WARN_STRICT_OVERFLOW_COMPARISON; 7553 warnmsg = G_("assuming signed overflow does not occur when " 7554 "simplifying conditional"); 7555 } 7556 7557 if (issue_strict_overflow_warning (wc)) 7558 { 7559 location_t location; 7560 7561 if (!gimple_has_location (stmt)) 7562 location = input_location; 7563 else 7564 location = gimple_location (stmt); 7565 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); 7566 } 7567 } 7568 7569 if (warn_type_limits 7570 && ret && only_ranges 7571 && TREE_CODE_CLASS (code) == tcc_comparison 7572 && TREE_CODE (op0) == SSA_NAME) 7573 { 7574 /* If the comparison is being folded and the operand on the LHS 7575 is being compared against a constant value that is outside of 7576 the natural range of OP0's type, then the predicate will 7577 always fold regardless of the value of OP0. If -Wtype-limits 7578 was specified, emit a warning. */ 7579 tree type = TREE_TYPE (op0); 7580 value_range_t *vr0 = get_value_range (op0); 7581 7582 if (vr0->type == VR_RANGE 7583 && INTEGRAL_TYPE_P (type) 7584 && vrp_val_is_min (vr0->min) 7585 && vrp_val_is_max (vr0->max) 7586 && is_gimple_min_invariant (op1)) 7587 { 7588 location_t location; 7589 7590 if (!gimple_has_location (stmt)) 7591 location = input_location; 7592 else 7593 location = gimple_location (stmt); 7594 7595 warning_at (location, OPT_Wtype_limits, 7596 integer_zerop (ret) 7597 ? G_("comparison always false " 7598 "due to limited range of data type") 7599 : G_("comparison always true " 7600 "due to limited range of data type")); 7601 } 7602 } 7603 7604 return ret; 7605} 7606 7607 7608/* Visit conditional statement STMT. If we can determine which edge 7609 will be taken out of STMT's basic block, record it in 7610 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 7611 SSA_PROP_VARYING. */ 7612 7613static enum ssa_prop_result 7614vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p) 7615{ 7616 tree val; 7617 bool sop; 7618 7619 *taken_edge_p = NULL; 7620 7621 if (dump_file && (dump_flags & TDF_DETAILS)) 7622 { 7623 tree use; 7624 ssa_op_iter i; 7625 7626 fprintf (dump_file, "\nVisiting conditional with predicate: "); 7627 print_gimple_stmt (dump_file, stmt, 0, 0); 7628 fprintf (dump_file, "\nWith known ranges\n"); 7629 7630 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) 7631 { 7632 fprintf (dump_file, "\t"); 7633 print_generic_expr (dump_file, use, 0); 7634 fprintf (dump_file, ": "); 7635 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); 7636 } 7637 7638 fprintf (dump_file, "\n"); 7639 } 7640 7641 /* Compute the value of the predicate COND by checking the known 7642 ranges of each of its operands. 7643 7644 Note that we cannot evaluate all the equivalent ranges here 7645 because those ranges may not yet be final and with the current 7646 propagation strategy, we cannot determine when the value ranges 7647 of the names in the equivalence set have changed. 7648 7649 For instance, given the following code fragment 7650 7651 i_5 = PHI <8, i_13> 7652 ... 7653 i_14 = ASSERT_EXPR <i_5, i_5 != 0> 7654 if (i_14 == 1) 7655 ... 7656 7657 Assume that on the first visit to i_14, i_5 has the temporary 7658 range [8, 8] because the second argument to the PHI function is 7659 not yet executable. We derive the range ~[0, 0] for i_14 and the 7660 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for 7661 the first time, since i_14 is equivalent to the range [8, 8], we 7662 determine that the predicate is always false. 7663 7664 On the next round of propagation, i_13 is determined to be 7665 VARYING, which causes i_5 to drop down to VARYING. So, another 7666 visit to i_14 is scheduled. In this second visit, we compute the 7667 exact same range and equivalence set for i_14, namely ~[0, 0] and 7668 { i_5 }. But we did not have the previous range for i_5 7669 registered, so vrp_visit_assignment thinks that the range for 7670 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' 7671 is not visited again, which stops propagation from visiting 7672 statements in the THEN clause of that if(). 7673 7674 To properly fix this we would need to keep the previous range 7675 value for the names in the equivalence set. This way we would've 7676 discovered that from one visit to the other i_5 changed from 7677 range [8, 8] to VR_VARYING. 7678 7679 However, fixing this apparent limitation may not be worth the 7680 additional checking. Testing on several code bases (GCC, DLV, 7681 MICO, TRAMP3D and SPEC2000) showed that doing this results in 7682 4 more predicates folded in SPEC. */ 7683 sop = false; 7684 7685 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), 7686 gimple_cond_lhs (stmt), 7687 gimple_cond_rhs (stmt), 7688 false, &sop, NULL); 7689 if (val) 7690 { 7691 if (!sop) 7692 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); 7693 else 7694 { 7695 if (dump_file && (dump_flags & TDF_DETAILS)) 7696 fprintf (dump_file, 7697 "\nIgnoring predicate evaluation because " 7698 "it assumes that signed overflow is undefined"); 7699 val = NULL_TREE; 7700 } 7701 } 7702 7703 if (dump_file && (dump_flags & TDF_DETAILS)) 7704 { 7705 fprintf (dump_file, "\nPredicate evaluates to: "); 7706 if (val == NULL_TREE) 7707 fprintf (dump_file, "DON'T KNOW\n"); 7708 else 7709 print_generic_stmt (dump_file, val, 0); 7710 } 7711 7712 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; 7713} 7714 7715/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL 7716 that includes the value VAL. The search is restricted to the range 7717 [START_IDX, n - 1] where n is the size of VEC. 7718 7719 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is 7720 returned. 7721 7722 If there is no CASE_LABEL for VAL and there is one that is larger than VAL, 7723 it is placed in IDX and false is returned. 7724 7725 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is 7726 returned. */ 7727 7728static bool 7729find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx) 7730{ 7731 size_t n = gimple_switch_num_labels (stmt); 7732 size_t low, high; 7733 7734 /* Find case label for minimum of the value range or the next one. 7735 At each iteration we are searching in [low, high - 1]. */ 7736 7737 for (low = start_idx, high = n; high != low; ) 7738 { 7739 tree t; 7740 int cmp; 7741 /* Note that i != high, so we never ask for n. */ 7742 size_t i = (high + low) / 2; 7743 t = gimple_switch_label (stmt, i); 7744 7745 /* Cache the result of comparing CASE_LOW and val. */ 7746 cmp = tree_int_cst_compare (CASE_LOW (t), val); 7747 7748 if (cmp == 0) 7749 { 7750 /* Ranges cannot be empty. */ 7751 *idx = i; 7752 return true; 7753 } 7754 else if (cmp > 0) 7755 high = i; 7756 else 7757 { 7758 low = i + 1; 7759 if (CASE_HIGH (t) != NULL 7760 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) 7761 { 7762 *idx = i; 7763 return true; 7764 } 7765 } 7766 } 7767 7768 *idx = high; 7769 return false; 7770} 7771 7772/* Searches the case label vector VEC for the range of CASE_LABELs that is used 7773 for values between MIN and MAX. The first index is placed in MIN_IDX. The 7774 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty 7775 then MAX_IDX < MIN_IDX. 7776 Returns true if the default label is not needed. */ 7777 7778static bool 7779find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx, 7780 size_t *max_idx) 7781{ 7782 size_t i, j; 7783 bool min_take_default = !find_case_label_index (stmt, 1, min, &i); 7784 bool max_take_default = !find_case_label_index (stmt, i, max, &j); 7785 7786 if (i == j 7787 && min_take_default 7788 && max_take_default) 7789 { 7790 /* Only the default case label reached. 7791 Return an empty range. */ 7792 *min_idx = 1; 7793 *max_idx = 0; 7794 return false; 7795 } 7796 else 7797 { 7798 bool take_default = min_take_default || max_take_default; 7799 tree low, high; 7800 size_t k; 7801 7802 if (max_take_default) 7803 j--; 7804 7805 /* If the case label range is continuous, we do not need 7806 the default case label. Verify that. */ 7807 high = CASE_LOW (gimple_switch_label (stmt, i)); 7808 if (CASE_HIGH (gimple_switch_label (stmt, i))) 7809 high = CASE_HIGH (gimple_switch_label (stmt, i)); 7810 for (k = i + 1; k <= j; ++k) 7811 { 7812 low = CASE_LOW (gimple_switch_label (stmt, k)); 7813 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) 7814 { 7815 take_default = true; 7816 break; 7817 } 7818 high = low; 7819 if (CASE_HIGH (gimple_switch_label (stmt, k))) 7820 high = CASE_HIGH (gimple_switch_label (stmt, k)); 7821 } 7822 7823 *min_idx = i; 7824 *max_idx = j; 7825 return !take_default; 7826 } 7827} 7828 7829/* Searches the case label vector VEC for the ranges of CASE_LABELs that are 7830 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and 7831 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. 7832 Returns true if the default label is not needed. */ 7833 7834static bool 7835find_case_label_ranges (gswitch *stmt, value_range_t *vr, size_t *min_idx1, 7836 size_t *max_idx1, size_t *min_idx2, 7837 size_t *max_idx2) 7838{ 7839 size_t i, j, k, l; 7840 unsigned int n = gimple_switch_num_labels (stmt); 7841 bool take_default; 7842 tree case_low, case_high; 7843 tree min = vr->min, max = vr->max; 7844 7845 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); 7846 7847 take_default = !find_case_label_range (stmt, min, max, &i, &j); 7848 7849 /* Set second range to emtpy. */ 7850 *min_idx2 = 1; 7851 *max_idx2 = 0; 7852 7853 if (vr->type == VR_RANGE) 7854 { 7855 *min_idx1 = i; 7856 *max_idx1 = j; 7857 return !take_default; 7858 } 7859 7860 /* Set first range to all case labels. */ 7861 *min_idx1 = 1; 7862 *max_idx1 = n - 1; 7863 7864 if (i > j) 7865 return false; 7866 7867 /* Make sure all the values of case labels [i , j] are contained in 7868 range [MIN, MAX]. */ 7869 case_low = CASE_LOW (gimple_switch_label (stmt, i)); 7870 case_high = CASE_HIGH (gimple_switch_label (stmt, j)); 7871 if (tree_int_cst_compare (case_low, min) < 0) 7872 i += 1; 7873 if (case_high != NULL_TREE 7874 && tree_int_cst_compare (max, case_high) < 0) 7875 j -= 1; 7876 7877 if (i > j) 7878 return false; 7879 7880 /* If the range spans case labels [i, j], the corresponding anti-range spans 7881 the labels [1, i - 1] and [j + 1, n - 1]. */ 7882 k = j + 1; 7883 l = n - 1; 7884 if (k > l) 7885 { 7886 k = 1; 7887 l = 0; 7888 } 7889 7890 j = i - 1; 7891 i = 1; 7892 if (i > j) 7893 { 7894 i = k; 7895 j = l; 7896 k = 1; 7897 l = 0; 7898 } 7899 7900 *min_idx1 = i; 7901 *max_idx1 = j; 7902 *min_idx2 = k; 7903 *max_idx2 = l; 7904 return false; 7905} 7906 7907/* Visit switch statement STMT. If we can determine which edge 7908 will be taken out of STMT's basic block, record it in 7909 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 7910 SSA_PROP_VARYING. */ 7911 7912static enum ssa_prop_result 7913vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p) 7914{ 7915 tree op, val; 7916 value_range_t *vr; 7917 size_t i = 0, j = 0, k, l; 7918 bool take_default; 7919 7920 *taken_edge_p = NULL; 7921 op = gimple_switch_index (stmt); 7922 if (TREE_CODE (op) != SSA_NAME) 7923 return SSA_PROP_VARYING; 7924 7925 vr = get_value_range (op); 7926 if (dump_file && (dump_flags & TDF_DETAILS)) 7927 { 7928 fprintf (dump_file, "\nVisiting switch expression with operand "); 7929 print_generic_expr (dump_file, op, 0); 7930 fprintf (dump_file, " with known range "); 7931 dump_value_range (dump_file, vr); 7932 fprintf (dump_file, "\n"); 7933 } 7934 7935 if ((vr->type != VR_RANGE 7936 && vr->type != VR_ANTI_RANGE) 7937 || symbolic_range_p (vr)) 7938 return SSA_PROP_VARYING; 7939 7940 /* Find the single edge that is taken from the switch expression. */ 7941 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); 7942 7943 /* Check if the range spans no CASE_LABEL. If so, we only reach the default 7944 label */ 7945 if (j < i) 7946 { 7947 gcc_assert (take_default); 7948 val = gimple_switch_default_label (stmt); 7949 } 7950 else 7951 { 7952 /* Check if labels with index i to j and maybe the default label 7953 are all reaching the same label. */ 7954 7955 val = gimple_switch_label (stmt, i); 7956 if (take_default 7957 && CASE_LABEL (gimple_switch_default_label (stmt)) 7958 != CASE_LABEL (val)) 7959 { 7960 if (dump_file && (dump_flags & TDF_DETAILS)) 7961 fprintf (dump_file, " not a single destination for this " 7962 "range\n"); 7963 return SSA_PROP_VARYING; 7964 } 7965 for (++i; i <= j; ++i) 7966 { 7967 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) 7968 { 7969 if (dump_file && (dump_flags & TDF_DETAILS)) 7970 fprintf (dump_file, " not a single destination for this " 7971 "range\n"); 7972 return SSA_PROP_VARYING; 7973 } 7974 } 7975 for (; k <= l; ++k) 7976 { 7977 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) 7978 { 7979 if (dump_file && (dump_flags & TDF_DETAILS)) 7980 fprintf (dump_file, " not a single destination for this " 7981 "range\n"); 7982 return SSA_PROP_VARYING; 7983 } 7984 } 7985 } 7986 7987 *taken_edge_p = find_edge (gimple_bb (stmt), 7988 label_to_block (CASE_LABEL (val))); 7989 7990 if (dump_file && (dump_flags & TDF_DETAILS)) 7991 { 7992 fprintf (dump_file, " will take edge to "); 7993 print_generic_stmt (dump_file, CASE_LABEL (val), 0); 7994 } 7995 7996 return SSA_PROP_INTERESTING; 7997} 7998 7999 8000/* Evaluate statement STMT. If the statement produces a useful range, 8001 return SSA_PROP_INTERESTING and record the SSA name with the 8002 interesting range into *OUTPUT_P. 8003 8004 If STMT is a conditional branch and we can determine its truth 8005 value, the taken edge is recorded in *TAKEN_EDGE_P. 8006 8007 If STMT produces a varying value, return SSA_PROP_VARYING. */ 8008 8009static enum ssa_prop_result 8010vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) 8011{ 8012 tree def; 8013 ssa_op_iter iter; 8014 8015 if (dump_file && (dump_flags & TDF_DETAILS)) 8016 { 8017 fprintf (dump_file, "\nVisiting statement:\n"); 8018 print_gimple_stmt (dump_file, stmt, 0, dump_flags); 8019 } 8020 8021 if (!stmt_interesting_for_vrp (stmt)) 8022 gcc_assert (stmt_ends_bb_p (stmt)); 8023 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 8024 return vrp_visit_assignment_or_call (stmt, output_p); 8025 else if (gimple_code (stmt) == GIMPLE_COND) 8026 return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p); 8027 else if (gimple_code (stmt) == GIMPLE_SWITCH) 8028 return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p); 8029 8030 /* All other statements produce nothing of interest for VRP, so mark 8031 their outputs varying and prevent further simulation. */ 8032 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 8033 set_value_range_to_varying (get_value_range (def)); 8034 8035 return SSA_PROP_VARYING; 8036} 8037 8038/* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and 8039 { VR1TYPE, VR0MIN, VR0MAX } and store the result 8040 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest 8041 possible such range. The resulting range is not canonicalized. */ 8042 8043static void 8044union_ranges (enum value_range_type *vr0type, 8045 tree *vr0min, tree *vr0max, 8046 enum value_range_type vr1type, 8047 tree vr1min, tree vr1max) 8048{ 8049 bool mineq = operand_equal_p (*vr0min, vr1min, 0); 8050 bool maxeq = operand_equal_p (*vr0max, vr1max, 0); 8051 8052 /* [] is vr0, () is vr1 in the following classification comments. */ 8053 if (mineq && maxeq) 8054 { 8055 /* [( )] */ 8056 if (*vr0type == vr1type) 8057 /* Nothing to do for equal ranges. */ 8058 ; 8059 else if ((*vr0type == VR_RANGE 8060 && vr1type == VR_ANTI_RANGE) 8061 || (*vr0type == VR_ANTI_RANGE 8062 && vr1type == VR_RANGE)) 8063 { 8064 /* For anti-range with range union the result is varying. */ 8065 goto give_up; 8066 } 8067 else 8068 gcc_unreachable (); 8069 } 8070 else if (operand_less_p (*vr0max, vr1min) == 1 8071 || operand_less_p (vr1max, *vr0min) == 1) 8072 { 8073 /* [ ] ( ) or ( ) [ ] 8074 If the ranges have an empty intersection, result of the union 8075 operation is the anti-range or if both are anti-ranges 8076 it covers all. */ 8077 if (*vr0type == VR_ANTI_RANGE 8078 && vr1type == VR_ANTI_RANGE) 8079 goto give_up; 8080 else if (*vr0type == VR_ANTI_RANGE 8081 && vr1type == VR_RANGE) 8082 ; 8083 else if (*vr0type == VR_RANGE 8084 && vr1type == VR_ANTI_RANGE) 8085 { 8086 *vr0type = vr1type; 8087 *vr0min = vr1min; 8088 *vr0max = vr1max; 8089 } 8090 else if (*vr0type == VR_RANGE 8091 && vr1type == VR_RANGE) 8092 { 8093 /* The result is the convex hull of both ranges. */ 8094 if (operand_less_p (*vr0max, vr1min) == 1) 8095 { 8096 /* If the result can be an anti-range, create one. */ 8097 if (TREE_CODE (*vr0max) == INTEGER_CST 8098 && TREE_CODE (vr1min) == INTEGER_CST 8099 && vrp_val_is_min (*vr0min) 8100 && vrp_val_is_max (vr1max)) 8101 { 8102 tree min = int_const_binop (PLUS_EXPR, 8103 *vr0max, 8104 build_int_cst (TREE_TYPE (*vr0max), 1)); 8105 tree max = int_const_binop (MINUS_EXPR, 8106 vr1min, 8107 build_int_cst (TREE_TYPE (vr1min), 1)); 8108 if (!operand_less_p (max, min)) 8109 { 8110 *vr0type = VR_ANTI_RANGE; 8111 *vr0min = min; 8112 *vr0max = max; 8113 } 8114 else 8115 *vr0max = vr1max; 8116 } 8117 else 8118 *vr0max = vr1max; 8119 } 8120 else 8121 { 8122 /* If the result can be an anti-range, create one. */ 8123 if (TREE_CODE (vr1max) == INTEGER_CST 8124 && TREE_CODE (*vr0min) == INTEGER_CST 8125 && vrp_val_is_min (vr1min) 8126 && vrp_val_is_max (*vr0max)) 8127 { 8128 tree min = int_const_binop (PLUS_EXPR, 8129 vr1max, 8130 build_int_cst (TREE_TYPE (vr1max), 1)); 8131 tree max = int_const_binop (MINUS_EXPR, 8132 *vr0min, 8133 build_int_cst (TREE_TYPE (*vr0min), 1)); 8134 if (!operand_less_p (max, min)) 8135 { 8136 *vr0type = VR_ANTI_RANGE; 8137 *vr0min = min; 8138 *vr0max = max; 8139 } 8140 else 8141 *vr0min = vr1min; 8142 } 8143 else 8144 *vr0min = vr1min; 8145 } 8146 } 8147 else 8148 gcc_unreachable (); 8149 } 8150 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) 8151 && (mineq || operand_less_p (*vr0min, vr1min) == 1)) 8152 { 8153 /* [ ( ) ] or [( ) ] or [ ( )] */ 8154 if (*vr0type == VR_RANGE 8155 && vr1type == VR_RANGE) 8156 ; 8157 else if (*vr0type == VR_ANTI_RANGE 8158 && vr1type == VR_ANTI_RANGE) 8159 { 8160 *vr0type = vr1type; 8161 *vr0min = vr1min; 8162 *vr0max = vr1max; 8163 } 8164 else if (*vr0type == VR_ANTI_RANGE 8165 && vr1type == VR_RANGE) 8166 { 8167 /* Arbitrarily choose the right or left gap. */ 8168 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) 8169 *vr0max = int_const_binop (MINUS_EXPR, vr1min, 8170 build_int_cst (TREE_TYPE (vr1min), 1)); 8171 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) 8172 *vr0min = int_const_binop (PLUS_EXPR, vr1max, 8173 build_int_cst (TREE_TYPE (vr1max), 1)); 8174 else 8175 goto give_up; 8176 } 8177 else if (*vr0type == VR_RANGE 8178 && vr1type == VR_ANTI_RANGE) 8179 /* The result covers everything. */ 8180 goto give_up; 8181 else 8182 gcc_unreachable (); 8183 } 8184 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) 8185 && (mineq || operand_less_p (vr1min, *vr0min) == 1)) 8186 { 8187 /* ( [ ] ) or ([ ] ) or ( [ ]) */ 8188 if (*vr0type == VR_RANGE 8189 && vr1type == VR_RANGE) 8190 { 8191 *vr0type = vr1type; 8192 *vr0min = vr1min; 8193 *vr0max = vr1max; 8194 } 8195 else if (*vr0type == VR_ANTI_RANGE 8196 && vr1type == VR_ANTI_RANGE) 8197 ; 8198 else if (*vr0type == VR_RANGE 8199 && vr1type == VR_ANTI_RANGE) 8200 { 8201 *vr0type = VR_ANTI_RANGE; 8202 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) 8203 { 8204 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, 8205 build_int_cst (TREE_TYPE (*vr0min), 1)); 8206 *vr0min = vr1min; 8207 } 8208 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) 8209 { 8210 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, 8211 build_int_cst (TREE_TYPE (*vr0max), 1)); 8212 *vr0max = vr1max; 8213 } 8214 else 8215 goto give_up; 8216 } 8217 else if (*vr0type == VR_ANTI_RANGE 8218 && vr1type == VR_RANGE) 8219 /* The result covers everything. */ 8220 goto give_up; 8221 else 8222 gcc_unreachable (); 8223 } 8224 else if ((operand_less_p (vr1min, *vr0max) == 1 8225 || operand_equal_p (vr1min, *vr0max, 0)) 8226 && operand_less_p (*vr0min, vr1min) == 1 8227 && operand_less_p (*vr0max, vr1max) == 1) 8228 { 8229 /* [ ( ] ) or [ ]( ) */ 8230 if (*vr0type == VR_RANGE 8231 && vr1type == VR_RANGE) 8232 *vr0max = vr1max; 8233 else if (*vr0type == VR_ANTI_RANGE 8234 && vr1type == VR_ANTI_RANGE) 8235 *vr0min = vr1min; 8236 else if (*vr0type == VR_ANTI_RANGE 8237 && vr1type == VR_RANGE) 8238 { 8239 if (TREE_CODE (vr1min) == INTEGER_CST) 8240 *vr0max = int_const_binop (MINUS_EXPR, vr1min, 8241 build_int_cst (TREE_TYPE (vr1min), 1)); 8242 else 8243 goto give_up; 8244 } 8245 else if (*vr0type == VR_RANGE 8246 && vr1type == VR_ANTI_RANGE) 8247 { 8248 if (TREE_CODE (*vr0max) == INTEGER_CST) 8249 { 8250 *vr0type = vr1type; 8251 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, 8252 build_int_cst (TREE_TYPE (*vr0max), 1)); 8253 *vr0max = vr1max; 8254 } 8255 else 8256 goto give_up; 8257 } 8258 else 8259 gcc_unreachable (); 8260 } 8261 else if ((operand_less_p (*vr0min, vr1max) == 1 8262 || operand_equal_p (*vr0min, vr1max, 0)) 8263 && operand_less_p (vr1min, *vr0min) == 1 8264 && operand_less_p (vr1max, *vr0max) == 1) 8265 { 8266 /* ( [ ) ] or ( )[ ] */ 8267 if (*vr0type == VR_RANGE 8268 && vr1type == VR_RANGE) 8269 *vr0min = vr1min; 8270 else if (*vr0type == VR_ANTI_RANGE 8271 && vr1type == VR_ANTI_RANGE) 8272 *vr0max = vr1max; 8273 else if (*vr0type == VR_ANTI_RANGE 8274 && vr1type == VR_RANGE) 8275 { 8276 if (TREE_CODE (vr1max) == INTEGER_CST) 8277 *vr0min = int_const_binop (PLUS_EXPR, vr1max, 8278 build_int_cst (TREE_TYPE (vr1max), 1)); 8279 else 8280 goto give_up; 8281 } 8282 else if (*vr0type == VR_RANGE 8283 && vr1type == VR_ANTI_RANGE) 8284 { 8285 if (TREE_CODE (*vr0min) == INTEGER_CST) 8286 { 8287 *vr0type = vr1type; 8288 *vr0min = vr1min; 8289 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, 8290 build_int_cst (TREE_TYPE (*vr0min), 1)); 8291 } 8292 else 8293 goto give_up; 8294 } 8295 else 8296 gcc_unreachable (); 8297 } 8298 else 8299 goto give_up; 8300 8301 return; 8302 8303give_up: 8304 *vr0type = VR_VARYING; 8305 *vr0min = NULL_TREE; 8306 *vr0max = NULL_TREE; 8307} 8308 8309/* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and 8310 { VR1TYPE, VR0MIN, VR0MAX } and store the result 8311 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest 8312 possible such range. The resulting range is not canonicalized. */ 8313 8314static void 8315intersect_ranges (enum value_range_type *vr0type, 8316 tree *vr0min, tree *vr0max, 8317 enum value_range_type vr1type, 8318 tree vr1min, tree vr1max) 8319{ 8320 bool mineq = operand_equal_p (*vr0min, vr1min, 0); 8321 bool maxeq = operand_equal_p (*vr0max, vr1max, 0); 8322 8323 /* [] is vr0, () is vr1 in the following classification comments. */ 8324 if (mineq && maxeq) 8325 { 8326 /* [( )] */ 8327 if (*vr0type == vr1type) 8328 /* Nothing to do for equal ranges. */ 8329 ; 8330 else if ((*vr0type == VR_RANGE 8331 && vr1type == VR_ANTI_RANGE) 8332 || (*vr0type == VR_ANTI_RANGE 8333 && vr1type == VR_RANGE)) 8334 { 8335 /* For anti-range with range intersection the result is empty. */ 8336 *vr0type = VR_UNDEFINED; 8337 *vr0min = NULL_TREE; 8338 *vr0max = NULL_TREE; 8339 } 8340 else 8341 gcc_unreachable (); 8342 } 8343 else if (operand_less_p (*vr0max, vr1min) == 1 8344 || operand_less_p (vr1max, *vr0min) == 1) 8345 { 8346 /* [ ] ( ) or ( ) [ ] 8347 If the ranges have an empty intersection, the result of the 8348 intersect operation is the range for intersecting an 8349 anti-range with a range or empty when intersecting two ranges. */ 8350 if (*vr0type == VR_RANGE 8351 && vr1type == VR_ANTI_RANGE) 8352 ; 8353 else if (*vr0type == VR_ANTI_RANGE 8354 && vr1type == VR_RANGE) 8355 { 8356 *vr0type = vr1type; 8357 *vr0min = vr1min; 8358 *vr0max = vr1max; 8359 } 8360 else if (*vr0type == VR_RANGE 8361 && vr1type == VR_RANGE) 8362 { 8363 *vr0type = VR_UNDEFINED; 8364 *vr0min = NULL_TREE; 8365 *vr0max = NULL_TREE; 8366 } 8367 else if (*vr0type == VR_ANTI_RANGE 8368 && vr1type == VR_ANTI_RANGE) 8369 { 8370 /* If the anti-ranges are adjacent to each other merge them. */ 8371 if (TREE_CODE (*vr0max) == INTEGER_CST 8372 && TREE_CODE (vr1min) == INTEGER_CST 8373 && operand_less_p (*vr0max, vr1min) == 1 8374 && integer_onep (int_const_binop (MINUS_EXPR, 8375 vr1min, *vr0max))) 8376 *vr0max = vr1max; 8377 else if (TREE_CODE (vr1max) == INTEGER_CST 8378 && TREE_CODE (*vr0min) == INTEGER_CST 8379 && operand_less_p (vr1max, *vr0min) == 1 8380 && integer_onep (int_const_binop (MINUS_EXPR, 8381 *vr0min, vr1max))) 8382 *vr0min = vr1min; 8383 /* Else arbitrarily take VR0. */ 8384 } 8385 } 8386 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) 8387 && (mineq || operand_less_p (*vr0min, vr1min) == 1)) 8388 { 8389 /* [ ( ) ] or [( ) ] or [ ( )] */ 8390 if (*vr0type == VR_RANGE 8391 && vr1type == VR_RANGE) 8392 { 8393 /* If both are ranges the result is the inner one. */ 8394 *vr0type = vr1type; 8395 *vr0min = vr1min; 8396 *vr0max = vr1max; 8397 } 8398 else if (*vr0type == VR_RANGE 8399 && vr1type == VR_ANTI_RANGE) 8400 { 8401 /* Choose the right gap if the left one is empty. */ 8402 if (mineq) 8403 { 8404 if (TREE_CODE (vr1max) == INTEGER_CST) 8405 *vr0min = int_const_binop (PLUS_EXPR, vr1max, 8406 build_int_cst (TREE_TYPE (vr1max), 1)); 8407 else 8408 *vr0min = vr1max; 8409 } 8410 /* Choose the left gap if the right one is empty. */ 8411 else if (maxeq) 8412 { 8413 if (TREE_CODE (vr1min) == INTEGER_CST) 8414 *vr0max = int_const_binop (MINUS_EXPR, vr1min, 8415 build_int_cst (TREE_TYPE (vr1min), 1)); 8416 else 8417 *vr0max = vr1min; 8418 } 8419 /* Choose the anti-range if the range is effectively varying. */ 8420 else if (vrp_val_is_min (*vr0min) 8421 && vrp_val_is_max (*vr0max)) 8422 { 8423 *vr0type = vr1type; 8424 *vr0min = vr1min; 8425 *vr0max = vr1max; 8426 } 8427 /* Else choose the range. */ 8428 } 8429 else if (*vr0type == VR_ANTI_RANGE 8430 && vr1type == VR_ANTI_RANGE) 8431 /* If both are anti-ranges the result is the outer one. */ 8432 ; 8433 else if (*vr0type == VR_ANTI_RANGE 8434 && vr1type == VR_RANGE) 8435 { 8436 /* The intersection is empty. */ 8437 *vr0type = VR_UNDEFINED; 8438 *vr0min = NULL_TREE; 8439 *vr0max = NULL_TREE; 8440 } 8441 else 8442 gcc_unreachable (); 8443 } 8444 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) 8445 && (mineq || operand_less_p (vr1min, *vr0min) == 1)) 8446 { 8447 /* ( [ ] ) or ([ ] ) or ( [ ]) */ 8448 if (*vr0type == VR_RANGE 8449 && vr1type == VR_RANGE) 8450 /* Choose the inner range. */ 8451 ; 8452 else if (*vr0type == VR_ANTI_RANGE 8453 && vr1type == VR_RANGE) 8454 { 8455 /* Choose the right gap if the left is empty. */ 8456 if (mineq) 8457 { 8458 *vr0type = VR_RANGE; 8459 if (TREE_CODE (*vr0max) == INTEGER_CST) 8460 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, 8461 build_int_cst (TREE_TYPE (*vr0max), 1)); 8462 else 8463 *vr0min = *vr0max; 8464 *vr0max = vr1max; 8465 } 8466 /* Choose the left gap if the right is empty. */ 8467 else if (maxeq) 8468 { 8469 *vr0type = VR_RANGE; 8470 if (TREE_CODE (*vr0min) == INTEGER_CST) 8471 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, 8472 build_int_cst (TREE_TYPE (*vr0min), 1)); 8473 else 8474 *vr0max = *vr0min; 8475 *vr0min = vr1min; 8476 } 8477 /* Choose the anti-range if the range is effectively varying. */ 8478 else if (vrp_val_is_min (vr1min) 8479 && vrp_val_is_max (vr1max)) 8480 ; 8481 /* Else choose the range. */ 8482 else 8483 { 8484 *vr0type = vr1type; 8485 *vr0min = vr1min; 8486 *vr0max = vr1max; 8487 } 8488 } 8489 else if (*vr0type == VR_ANTI_RANGE 8490 && vr1type == VR_ANTI_RANGE) 8491 { 8492 /* If both are anti-ranges the result is the outer one. */ 8493 *vr0type = vr1type; 8494 *vr0min = vr1min; 8495 *vr0max = vr1max; 8496 } 8497 else if (vr1type == VR_ANTI_RANGE 8498 && *vr0type == VR_RANGE) 8499 { 8500 /* The intersection is empty. */ 8501 *vr0type = VR_UNDEFINED; 8502 *vr0min = NULL_TREE; 8503 *vr0max = NULL_TREE; 8504 } 8505 else 8506 gcc_unreachable (); 8507 } 8508 else if ((operand_less_p (vr1min, *vr0max) == 1 8509 || operand_equal_p (vr1min, *vr0max, 0)) 8510 && operand_less_p (*vr0min, vr1min) == 1) 8511 { 8512 /* [ ( ] ) or [ ]( ) */ 8513 if (*vr0type == VR_ANTI_RANGE 8514 && vr1type == VR_ANTI_RANGE) 8515 *vr0max = vr1max; 8516 else if (*vr0type == VR_RANGE 8517 && vr1type == VR_RANGE) 8518 *vr0min = vr1min; 8519 else if (*vr0type == VR_RANGE 8520 && vr1type == VR_ANTI_RANGE) 8521 { 8522 if (TREE_CODE (vr1min) == INTEGER_CST) 8523 *vr0max = int_const_binop (MINUS_EXPR, vr1min, 8524 build_int_cst (TREE_TYPE (vr1min), 1)); 8525 else 8526 *vr0max = vr1min; 8527 } 8528 else if (*vr0type == VR_ANTI_RANGE 8529 && vr1type == VR_RANGE) 8530 { 8531 *vr0type = VR_RANGE; 8532 if (TREE_CODE (*vr0max) == INTEGER_CST) 8533 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, 8534 build_int_cst (TREE_TYPE (*vr0max), 1)); 8535 else 8536 *vr0min = *vr0max; 8537 *vr0max = vr1max; 8538 } 8539 else 8540 gcc_unreachable (); 8541 } 8542 else if ((operand_less_p (*vr0min, vr1max) == 1 8543 || operand_equal_p (*vr0min, vr1max, 0)) 8544 && operand_less_p (vr1min, *vr0min) == 1) 8545 { 8546 /* ( [ ) ] or ( )[ ] */ 8547 if (*vr0type == VR_ANTI_RANGE 8548 && vr1type == VR_ANTI_RANGE) 8549 *vr0min = vr1min; 8550 else if (*vr0type == VR_RANGE 8551 && vr1type == VR_RANGE) 8552 *vr0max = vr1max; 8553 else if (*vr0type == VR_RANGE 8554 && vr1type == VR_ANTI_RANGE) 8555 { 8556 if (TREE_CODE (vr1max) == INTEGER_CST) 8557 *vr0min = int_const_binop (PLUS_EXPR, vr1max, 8558 build_int_cst (TREE_TYPE (vr1max), 1)); 8559 else 8560 *vr0min = vr1max; 8561 } 8562 else if (*vr0type == VR_ANTI_RANGE 8563 && vr1type == VR_RANGE) 8564 { 8565 *vr0type = VR_RANGE; 8566 if (TREE_CODE (*vr0min) == INTEGER_CST) 8567 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, 8568 build_int_cst (TREE_TYPE (*vr0min), 1)); 8569 else 8570 *vr0max = *vr0min; 8571 *vr0min = vr1min; 8572 } 8573 else 8574 gcc_unreachable (); 8575 } 8576 8577 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as 8578 result for the intersection. That's always a conservative 8579 correct estimate. */ 8580 8581 return; 8582} 8583 8584 8585/* Intersect the two value-ranges *VR0 and *VR1 and store the result 8586 in *VR0. This may not be the smallest possible such range. */ 8587 8588static void 8589vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1) 8590{ 8591 value_range_t saved; 8592 8593 /* If either range is VR_VARYING the other one wins. */ 8594 if (vr1->type == VR_VARYING) 8595 return; 8596 if (vr0->type == VR_VARYING) 8597 { 8598 copy_value_range (vr0, vr1); 8599 return; 8600 } 8601 8602 /* When either range is VR_UNDEFINED the resulting range is 8603 VR_UNDEFINED, too. */ 8604 if (vr0->type == VR_UNDEFINED) 8605 return; 8606 if (vr1->type == VR_UNDEFINED) 8607 { 8608 set_value_range_to_undefined (vr0); 8609 return; 8610 } 8611 8612 /* Save the original vr0 so we can return it as conservative intersection 8613 result when our worker turns things to varying. */ 8614 saved = *vr0; 8615 intersect_ranges (&vr0->type, &vr0->min, &vr0->max, 8616 vr1->type, vr1->min, vr1->max); 8617 /* Make sure to canonicalize the result though as the inversion of a 8618 VR_RANGE can still be a VR_RANGE. */ 8619 set_and_canonicalize_value_range (vr0, vr0->type, 8620 vr0->min, vr0->max, vr0->equiv); 8621 /* If that failed, use the saved original VR0. */ 8622 if (vr0->type == VR_VARYING) 8623 { 8624 *vr0 = saved; 8625 return; 8626 } 8627 /* If the result is VR_UNDEFINED there is no need to mess with 8628 the equivalencies. */ 8629 if (vr0->type == VR_UNDEFINED) 8630 return; 8631 8632 /* The resulting set of equivalences for range intersection is the union of 8633 the two sets. */ 8634 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 8635 bitmap_ior_into (vr0->equiv, vr1->equiv); 8636 else if (vr1->equiv && !vr0->equiv) 8637 bitmap_copy (vr0->equiv, vr1->equiv); 8638} 8639 8640static void 8641vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1) 8642{ 8643 if (dump_file && (dump_flags & TDF_DETAILS)) 8644 { 8645 fprintf (dump_file, "Intersecting\n "); 8646 dump_value_range (dump_file, vr0); 8647 fprintf (dump_file, "\nand\n "); 8648 dump_value_range (dump_file, vr1); 8649 fprintf (dump_file, "\n"); 8650 } 8651 vrp_intersect_ranges_1 (vr0, vr1); 8652 if (dump_file && (dump_flags & TDF_DETAILS)) 8653 { 8654 fprintf (dump_file, "to\n "); 8655 dump_value_range (dump_file, vr0); 8656 fprintf (dump_file, "\n"); 8657 } 8658} 8659 8660/* Meet operation for value ranges. Given two value ranges VR0 and 8661 VR1, store in VR0 a range that contains both VR0 and VR1. This 8662 may not be the smallest possible such range. */ 8663 8664static void 8665vrp_meet_1 (value_range_t *vr0, value_range_t *vr1) 8666{ 8667 value_range_t saved; 8668 8669 if (vr0->type == VR_UNDEFINED) 8670 { 8671 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv); 8672 return; 8673 } 8674 8675 if (vr1->type == VR_UNDEFINED) 8676 { 8677 /* VR0 already has the resulting range. */ 8678 return; 8679 } 8680 8681 if (vr0->type == VR_VARYING) 8682 { 8683 /* Nothing to do. VR0 already has the resulting range. */ 8684 return; 8685 } 8686 8687 if (vr1->type == VR_VARYING) 8688 { 8689 set_value_range_to_varying (vr0); 8690 return; 8691 } 8692 8693 saved = *vr0; 8694 union_ranges (&vr0->type, &vr0->min, &vr0->max, 8695 vr1->type, vr1->min, vr1->max); 8696 if (vr0->type == VR_VARYING) 8697 { 8698 /* Failed to find an efficient meet. Before giving up and setting 8699 the result to VARYING, see if we can at least derive a useful 8700 anti-range. FIXME, all this nonsense about distinguishing 8701 anti-ranges from ranges is necessary because of the odd 8702 semantics of range_includes_zero_p and friends. */ 8703 if (((saved.type == VR_RANGE 8704 && range_includes_zero_p (saved.min, saved.max) == 0) 8705 || (saved.type == VR_ANTI_RANGE 8706 && range_includes_zero_p (saved.min, saved.max) == 1)) 8707 && ((vr1->type == VR_RANGE 8708 && range_includes_zero_p (vr1->min, vr1->max) == 0) 8709 || (vr1->type == VR_ANTI_RANGE 8710 && range_includes_zero_p (vr1->min, vr1->max) == 1))) 8711 { 8712 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); 8713 8714 /* Since this meet operation did not result from the meeting of 8715 two equivalent names, VR0 cannot have any equivalences. */ 8716 if (vr0->equiv) 8717 bitmap_clear (vr0->equiv); 8718 return; 8719 } 8720 8721 set_value_range_to_varying (vr0); 8722 return; 8723 } 8724 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, 8725 vr0->equiv); 8726 if (vr0->type == VR_VARYING) 8727 return; 8728 8729 /* The resulting set of equivalences is always the intersection of 8730 the two sets. */ 8731 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 8732 bitmap_and_into (vr0->equiv, vr1->equiv); 8733 else if (vr0->equiv && !vr1->equiv) 8734 bitmap_clear (vr0->equiv); 8735} 8736 8737static void 8738vrp_meet (value_range_t *vr0, value_range_t *vr1) 8739{ 8740 if (dump_file && (dump_flags & TDF_DETAILS)) 8741 { 8742 fprintf (dump_file, "Meeting\n "); 8743 dump_value_range (dump_file, vr0); 8744 fprintf (dump_file, "\nand\n "); 8745 dump_value_range (dump_file, vr1); 8746 fprintf (dump_file, "\n"); 8747 } 8748 vrp_meet_1 (vr0, vr1); 8749 if (dump_file && (dump_flags & TDF_DETAILS)) 8750 { 8751 fprintf (dump_file, "to\n "); 8752 dump_value_range (dump_file, vr0); 8753 fprintf (dump_file, "\n"); 8754 } 8755} 8756 8757 8758/* Visit all arguments for PHI node PHI that flow through executable 8759 edges. If a valid value range can be derived from all the incoming 8760 value ranges, set a new range for the LHS of PHI. */ 8761 8762static enum ssa_prop_result 8763vrp_visit_phi_node (gphi *phi) 8764{ 8765 size_t i; 8766 tree lhs = PHI_RESULT (phi); 8767 value_range_t *lhs_vr = get_value_range (lhs); 8768 value_range_t vr_result = VR_INITIALIZER; 8769 bool first = true; 8770 int edges, old_edges; 8771 struct loop *l; 8772 8773 if (dump_file && (dump_flags & TDF_DETAILS)) 8774 { 8775 fprintf (dump_file, "\nVisiting PHI node: "); 8776 print_gimple_stmt (dump_file, phi, 0, dump_flags); 8777 } 8778 8779 edges = 0; 8780 for (i = 0; i < gimple_phi_num_args (phi); i++) 8781 { 8782 edge e = gimple_phi_arg_edge (phi, i); 8783 8784 if (dump_file && (dump_flags & TDF_DETAILS)) 8785 { 8786 fprintf (dump_file, 8787 " Argument #%d (%d -> %d %sexecutable)\n", 8788 (int) i, e->src->index, e->dest->index, 8789 (e->flags & EDGE_EXECUTABLE) ? "" : "not "); 8790 } 8791 8792 if (e->flags & EDGE_EXECUTABLE) 8793 { 8794 tree arg = PHI_ARG_DEF (phi, i); 8795 value_range_t vr_arg; 8796 8797 ++edges; 8798 8799 if (TREE_CODE (arg) == SSA_NAME) 8800 { 8801 vr_arg = *(get_value_range (arg)); 8802 /* Do not allow equivalences or symbolic ranges to leak in from 8803 backedges. That creates invalid equivalencies. 8804 See PR53465 and PR54767. */ 8805 if (e->flags & EDGE_DFS_BACK) 8806 { 8807 if (vr_arg.type == VR_RANGE 8808 || vr_arg.type == VR_ANTI_RANGE) 8809 { 8810 vr_arg.equiv = NULL; 8811 if (symbolic_range_p (&vr_arg)) 8812 { 8813 vr_arg.type = VR_VARYING; 8814 vr_arg.min = NULL_TREE; 8815 vr_arg.max = NULL_TREE; 8816 } 8817 } 8818 } 8819 else 8820 { 8821 /* If the non-backedge arguments range is VR_VARYING then 8822 we can still try recording a simple equivalence. */ 8823 if (vr_arg.type == VR_VARYING) 8824 { 8825 vr_arg.type = VR_RANGE; 8826 vr_arg.min = arg; 8827 vr_arg.max = arg; 8828 vr_arg.equiv = NULL; 8829 } 8830 } 8831 } 8832 else 8833 { 8834 if (TREE_OVERFLOW_P (arg)) 8835 arg = drop_tree_overflow (arg); 8836 8837 vr_arg.type = VR_RANGE; 8838 vr_arg.min = arg; 8839 vr_arg.max = arg; 8840 vr_arg.equiv = NULL; 8841 } 8842 8843 if (dump_file && (dump_flags & TDF_DETAILS)) 8844 { 8845 fprintf (dump_file, "\t"); 8846 print_generic_expr (dump_file, arg, dump_flags); 8847 fprintf (dump_file, ": "); 8848 dump_value_range (dump_file, &vr_arg); 8849 fprintf (dump_file, "\n"); 8850 } 8851 8852 if (first) 8853 copy_value_range (&vr_result, &vr_arg); 8854 else 8855 vrp_meet (&vr_result, &vr_arg); 8856 first = false; 8857 8858 if (vr_result.type == VR_VARYING) 8859 break; 8860 } 8861 } 8862 8863 if (vr_result.type == VR_VARYING) 8864 goto varying; 8865 else if (vr_result.type == VR_UNDEFINED) 8866 goto update_range; 8867 8868 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; 8869 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; 8870 8871 /* To prevent infinite iterations in the algorithm, derive ranges 8872 when the new value is slightly bigger or smaller than the 8873 previous one. We don't do this if we have seen a new executable 8874 edge; this helps us avoid an overflow infinity for conditionals 8875 which are not in a loop. If the old value-range was VR_UNDEFINED 8876 use the updated range and iterate one more time. */ 8877 if (edges > 0 8878 && gimple_phi_num_args (phi) > 1 8879 && edges == old_edges 8880 && lhs_vr->type != VR_UNDEFINED) 8881 { 8882 /* Compare old and new ranges, fall back to varying if the 8883 values are not comparable. */ 8884 int cmp_min = compare_values (lhs_vr->min, vr_result.min); 8885 if (cmp_min == -2) 8886 goto varying; 8887 int cmp_max = compare_values (lhs_vr->max, vr_result.max); 8888 if (cmp_max == -2) 8889 goto varying; 8890 8891 /* For non VR_RANGE or for pointers fall back to varying if 8892 the range changed. */ 8893 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE 8894 || POINTER_TYPE_P (TREE_TYPE (lhs))) 8895 && (cmp_min != 0 || cmp_max != 0)) 8896 goto varying; 8897 8898 /* If the new minimum is larger than than the previous one 8899 retain the old value. If the new minimum value is smaller 8900 than the previous one and not -INF go all the way to -INF + 1. 8901 In the first case, to avoid infinite bouncing between different 8902 minimums, and in the other case to avoid iterating millions of 8903 times to reach -INF. Going to -INF + 1 also lets the following 8904 iteration compute whether there will be any overflow, at the 8905 expense of one additional iteration. */ 8906 if (cmp_min < 0) 8907 vr_result.min = lhs_vr->min; 8908 else if (cmp_min > 0 8909 && !vrp_val_is_min (vr_result.min)) 8910 vr_result.min 8911 = int_const_binop (PLUS_EXPR, 8912 vrp_val_min (TREE_TYPE (vr_result.min)), 8913 build_int_cst (TREE_TYPE (vr_result.min), 1)); 8914 8915 /* Similarly for the maximum value. */ 8916 if (cmp_max > 0) 8917 vr_result.max = lhs_vr->max; 8918 else if (cmp_max < 0 8919 && !vrp_val_is_max (vr_result.max)) 8920 vr_result.max 8921 = int_const_binop (MINUS_EXPR, 8922 vrp_val_max (TREE_TYPE (vr_result.min)), 8923 build_int_cst (TREE_TYPE (vr_result.min), 1)); 8924 8925 /* If we dropped either bound to +-INF then if this is a loop 8926 PHI node SCEV may known more about its value-range. */ 8927 if ((cmp_min > 0 || cmp_min < 0 8928 || cmp_max < 0 || cmp_max > 0) 8929 && (l = loop_containing_stmt (phi)) 8930 && l->header == gimple_bb (phi)) 8931 adjust_range_with_scev (&vr_result, l, phi, lhs); 8932 8933 /* If we will end up with a (-INF, +INF) range, set it to 8934 VARYING. Same if the previous max value was invalid for 8935 the type and we end up with vr_result.min > vr_result.max. */ 8936 if ((vrp_val_is_max (vr_result.max) 8937 && vrp_val_is_min (vr_result.min)) 8938 || compare_values (vr_result.min, 8939 vr_result.max) > 0) 8940 goto varying; 8941 } 8942 8943 /* If the new range is different than the previous value, keep 8944 iterating. */ 8945update_range: 8946 if (update_value_range (lhs, &vr_result)) 8947 { 8948 if (dump_file && (dump_flags & TDF_DETAILS)) 8949 { 8950 fprintf (dump_file, "Found new range for "); 8951 print_generic_expr (dump_file, lhs, 0); 8952 fprintf (dump_file, ": "); 8953 dump_value_range (dump_file, &vr_result); 8954 fprintf (dump_file, "\n"); 8955 } 8956 8957 if (vr_result.type == VR_VARYING) 8958 return SSA_PROP_VARYING; 8959 8960 return SSA_PROP_INTERESTING; 8961 } 8962 8963 /* Nothing changed, don't add outgoing edges. */ 8964 return SSA_PROP_NOT_INTERESTING; 8965 8966 /* No match found. Set the LHS to VARYING. */ 8967varying: 8968 set_value_range_to_varying (lhs_vr); 8969 return SSA_PROP_VARYING; 8970} 8971 8972/* Simplify boolean operations if the source is known 8973 to be already a boolean. */ 8974static bool 8975simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 8976{ 8977 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 8978 tree lhs, op0, op1; 8979 bool need_conversion; 8980 8981 /* We handle only !=/== case here. */ 8982 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); 8983 8984 op0 = gimple_assign_rhs1 (stmt); 8985 if (!op_with_boolean_value_range_p (op0)) 8986 return false; 8987 8988 op1 = gimple_assign_rhs2 (stmt); 8989 if (!op_with_boolean_value_range_p (op1)) 8990 return false; 8991 8992 /* Reduce number of cases to handle to NE_EXPR. As there is no 8993 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ 8994 if (rhs_code == EQ_EXPR) 8995 { 8996 if (TREE_CODE (op1) == INTEGER_CST) 8997 op1 = int_const_binop (BIT_XOR_EXPR, op1, 8998 build_int_cst (TREE_TYPE (op1), 1)); 8999 else 9000 return false; 9001 } 9002 9003 lhs = gimple_assign_lhs (stmt); 9004 need_conversion 9005 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); 9006 9007 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ 9008 if (need_conversion 9009 && !TYPE_UNSIGNED (TREE_TYPE (op0)) 9010 && TYPE_PRECISION (TREE_TYPE (op0)) == 1 9011 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) 9012 return false; 9013 9014 /* For A != 0 we can substitute A itself. */ 9015 if (integer_zerop (op1)) 9016 gimple_assign_set_rhs_with_ops (gsi, 9017 need_conversion 9018 ? NOP_EXPR : TREE_CODE (op0), op0); 9019 /* For A != B we substitute A ^ B. Either with conversion. */ 9020 else if (need_conversion) 9021 { 9022 tree tem = make_ssa_name (TREE_TYPE (op0)); 9023 gassign *newop 9024 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1); 9025 gsi_insert_before (gsi, newop, GSI_SAME_STMT); 9026 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem); 9027 } 9028 /* Or without. */ 9029 else 9030 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); 9031 update_stmt (gsi_stmt (*gsi)); 9032 9033 return true; 9034} 9035 9036/* Simplify a division or modulo operator to a right shift or 9037 bitwise and if the first operand is unsigned or is greater 9038 than zero and the second operand is an exact power of two. 9039 For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it 9040 into just op0 if op0's range is known to be a subset of 9041 [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned 9042 modulo. */ 9043 9044static bool 9045simplify_div_or_mod_using_ranges (gimple stmt) 9046{ 9047 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 9048 tree val = NULL; 9049 tree op0 = gimple_assign_rhs1 (stmt); 9050 tree op1 = gimple_assign_rhs2 (stmt); 9051 value_range_t *vr = get_value_range (op0); 9052 9053 if (rhs_code == TRUNC_MOD_EXPR 9054 && TREE_CODE (op1) == INTEGER_CST 9055 && tree_int_cst_sgn (op1) == 1 9056 && range_int_cst_p (vr) 9057 && tree_int_cst_lt (vr->max, op1)) 9058 { 9059 if (TYPE_UNSIGNED (TREE_TYPE (op0)) 9060 || tree_int_cst_sgn (vr->min) >= 0 9061 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1), 9062 vr->min)) 9063 { 9064 /* If op0 already has the range op0 % op1 has, 9065 then TRUNC_MOD_EXPR won't change anything. */ 9066 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); 9067 gimple_assign_set_rhs_from_tree (&gsi, op0); 9068 update_stmt (stmt); 9069 return true; 9070 } 9071 } 9072 9073 if (!integer_pow2p (op1)) 9074 return false; 9075 9076 if (TYPE_UNSIGNED (TREE_TYPE (op0))) 9077 { 9078 val = integer_one_node; 9079 } 9080 else 9081 { 9082 bool sop = false; 9083 9084 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); 9085 9086 if (val 9087 && sop 9088 && integer_onep (val) 9089 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 9090 { 9091 location_t location; 9092 9093 if (!gimple_has_location (stmt)) 9094 location = input_location; 9095 else 9096 location = gimple_location (stmt); 9097 warning_at (location, OPT_Wstrict_overflow, 9098 "assuming signed overflow does not occur when " 9099 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); 9100 } 9101 } 9102 9103 if (val && integer_onep (val)) 9104 { 9105 tree t; 9106 9107 if (rhs_code == TRUNC_DIV_EXPR) 9108 { 9109 t = build_int_cst (integer_type_node, tree_log2 (op1)); 9110 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); 9111 gimple_assign_set_rhs1 (stmt, op0); 9112 gimple_assign_set_rhs2 (stmt, t); 9113 } 9114 else 9115 { 9116 t = build_int_cst (TREE_TYPE (op1), 1); 9117 t = int_const_binop (MINUS_EXPR, op1, t); 9118 t = fold_convert (TREE_TYPE (op0), t); 9119 9120 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); 9121 gimple_assign_set_rhs1 (stmt, op0); 9122 gimple_assign_set_rhs2 (stmt, t); 9123 } 9124 9125 update_stmt (stmt); 9126 return true; 9127 } 9128 9129 return false; 9130} 9131 9132/* If the operand to an ABS_EXPR is >= 0, then eliminate the 9133 ABS_EXPR. If the operand is <= 0, then simplify the 9134 ABS_EXPR into a NEGATE_EXPR. */ 9135 9136static bool 9137simplify_abs_using_ranges (gimple stmt) 9138{ 9139 tree val = NULL; 9140 tree op = gimple_assign_rhs1 (stmt); 9141 tree type = TREE_TYPE (op); 9142 value_range_t *vr = get_value_range (op); 9143 9144 if (TYPE_UNSIGNED (type)) 9145 { 9146 val = integer_zero_node; 9147 } 9148 else if (vr) 9149 { 9150 bool sop = false; 9151 9152 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); 9153 if (!val) 9154 { 9155 sop = false; 9156 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, 9157 &sop); 9158 9159 if (val) 9160 { 9161 if (integer_zerop (val)) 9162 val = integer_one_node; 9163 else if (integer_onep (val)) 9164 val = integer_zero_node; 9165 } 9166 } 9167 9168 if (val 9169 && (integer_onep (val) || integer_zerop (val))) 9170 { 9171 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 9172 { 9173 location_t location; 9174 9175 if (!gimple_has_location (stmt)) 9176 location = input_location; 9177 else 9178 location = gimple_location (stmt); 9179 warning_at (location, OPT_Wstrict_overflow, 9180 "assuming signed overflow does not occur when " 9181 "simplifying %<abs (X)%> to %<X%> or %<-X%>"); 9182 } 9183 9184 gimple_assign_set_rhs1 (stmt, op); 9185 if (integer_onep (val)) 9186 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); 9187 else 9188 gimple_assign_set_rhs_code (stmt, SSA_NAME); 9189 update_stmt (stmt); 9190 return true; 9191 } 9192 } 9193 9194 return false; 9195} 9196 9197/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. 9198 If all the bits that are being cleared by & are already 9199 known to be zero from VR, or all the bits that are being 9200 set by | are already known to be one from VR, the bit 9201 operation is redundant. */ 9202 9203static bool 9204simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 9205{ 9206 tree op0 = gimple_assign_rhs1 (stmt); 9207 tree op1 = gimple_assign_rhs2 (stmt); 9208 tree op = NULL_TREE; 9209 value_range_t vr0 = VR_INITIALIZER; 9210 value_range_t vr1 = VR_INITIALIZER; 9211 wide_int may_be_nonzero0, may_be_nonzero1; 9212 wide_int must_be_nonzero0, must_be_nonzero1; 9213 wide_int mask; 9214 9215 if (TREE_CODE (op0) == SSA_NAME) 9216 vr0 = *(get_value_range (op0)); 9217 else if (is_gimple_min_invariant (op0)) 9218 set_value_range_to_value (&vr0, op0, NULL); 9219 else 9220 return false; 9221 9222 if (TREE_CODE (op1) == SSA_NAME) 9223 vr1 = *(get_value_range (op1)); 9224 else if (is_gimple_min_invariant (op1)) 9225 set_value_range_to_value (&vr1, op1, NULL); 9226 else 9227 return false; 9228 9229 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, 9230 &must_be_nonzero0)) 9231 return false; 9232 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, 9233 &must_be_nonzero1)) 9234 return false; 9235 9236 switch (gimple_assign_rhs_code (stmt)) 9237 { 9238 case BIT_AND_EXPR: 9239 mask = may_be_nonzero0.and_not (must_be_nonzero1); 9240 if (mask == 0) 9241 { 9242 op = op0; 9243 break; 9244 } 9245 mask = may_be_nonzero1.and_not (must_be_nonzero0); 9246 if (mask == 0) 9247 { 9248 op = op1; 9249 break; 9250 } 9251 break; 9252 case BIT_IOR_EXPR: 9253 mask = may_be_nonzero0.and_not (must_be_nonzero1); 9254 if (mask == 0) 9255 { 9256 op = op1; 9257 break; 9258 } 9259 mask = may_be_nonzero1.and_not (must_be_nonzero0); 9260 if (mask == 0) 9261 { 9262 op = op0; 9263 break; 9264 } 9265 break; 9266 default: 9267 gcc_unreachable (); 9268 } 9269 9270 if (op == NULL_TREE) 9271 return false; 9272 9273 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op); 9274 update_stmt (gsi_stmt (*gsi)); 9275 return true; 9276} 9277 9278/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has 9279 a known value range VR. 9280 9281 If there is one and only one value which will satisfy the 9282 conditional, then return that value. Else return NULL. 9283 9284 If signed overflow must be undefined for the value to satisfy 9285 the conditional, then set *STRICT_OVERFLOW_P to true. */ 9286 9287static tree 9288test_for_singularity (enum tree_code cond_code, tree op0, 9289 tree op1, value_range_t *vr, 9290 bool *strict_overflow_p) 9291{ 9292 tree min = NULL; 9293 tree max = NULL; 9294 9295 /* Extract minimum/maximum values which satisfy the 9296 the conditional as it was written. */ 9297 if (cond_code == LE_EXPR || cond_code == LT_EXPR) 9298 { 9299 /* This should not be negative infinity; there is no overflow 9300 here. */ 9301 min = TYPE_MIN_VALUE (TREE_TYPE (op0)); 9302 9303 max = op1; 9304 if (cond_code == LT_EXPR && !is_overflow_infinity (max)) 9305 { 9306 tree one = build_int_cst (TREE_TYPE (op0), 1); 9307 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); 9308 if (EXPR_P (max)) 9309 TREE_NO_WARNING (max) = 1; 9310 } 9311 } 9312 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 9313 { 9314 /* This should not be positive infinity; there is no overflow 9315 here. */ 9316 max = TYPE_MAX_VALUE (TREE_TYPE (op0)); 9317 9318 min = op1; 9319 if (cond_code == GT_EXPR && !is_overflow_infinity (min)) 9320 { 9321 tree one = build_int_cst (TREE_TYPE (op0), 1); 9322 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); 9323 if (EXPR_P (min)) 9324 TREE_NO_WARNING (min) = 1; 9325 } 9326 } 9327 9328 /* Now refine the minimum and maximum values using any 9329 value range information we have for op0. */ 9330 if (min && max) 9331 { 9332 if (compare_values (vr->min, min) == 1) 9333 min = vr->min; 9334 if (compare_values (vr->max, max) == -1) 9335 max = vr->max; 9336 9337 /* If the new min/max values have converged to a single value, 9338 then there is only one value which can satisfy the condition, 9339 return that value. */ 9340 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) 9341 { 9342 if ((cond_code == LE_EXPR || cond_code == LT_EXPR) 9343 && is_overflow_infinity (vr->max)) 9344 *strict_overflow_p = true; 9345 if ((cond_code == GE_EXPR || cond_code == GT_EXPR) 9346 && is_overflow_infinity (vr->min)) 9347 *strict_overflow_p = true; 9348 9349 return min; 9350 } 9351 } 9352 return NULL; 9353} 9354 9355/* Return whether the value range *VR fits in an integer type specified 9356 by PRECISION and UNSIGNED_P. */ 9357 9358static bool 9359range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) 9360{ 9361 tree src_type; 9362 unsigned src_precision; 9363 widest_int tem; 9364 signop src_sgn; 9365 9366 /* We can only handle integral and pointer types. */ 9367 src_type = TREE_TYPE (vr->min); 9368 if (!INTEGRAL_TYPE_P (src_type) 9369 && !POINTER_TYPE_P (src_type)) 9370 return false; 9371 9372 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, 9373 and so is an identity transform. */ 9374 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); 9375 src_sgn = TYPE_SIGN (src_type); 9376 if ((src_precision < dest_precision 9377 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) 9378 || (src_precision == dest_precision && src_sgn == dest_sgn)) 9379 return true; 9380 9381 /* Now we can only handle ranges with constant bounds. */ 9382 if (vr->type != VR_RANGE 9383 || TREE_CODE (vr->min) != INTEGER_CST 9384 || TREE_CODE (vr->max) != INTEGER_CST) 9385 return false; 9386 9387 /* For sign changes, the MSB of the wide_int has to be clear. 9388 An unsigned value with its MSB set cannot be represented by 9389 a signed wide_int, while a negative value cannot be represented 9390 by an unsigned wide_int. */ 9391 if (src_sgn != dest_sgn 9392 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0))) 9393 return false; 9394 9395 /* Then we can perform the conversion on both ends and compare 9396 the result for equality. */ 9397 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); 9398 if (tem != wi::to_widest (vr->min)) 9399 return false; 9400 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); 9401 if (tem != wi::to_widest (vr->max)) 9402 return false; 9403 9404 return true; 9405} 9406 9407/* Simplify a conditional using a relational operator to an equality 9408 test if the range information indicates only one value can satisfy 9409 the original conditional. */ 9410 9411static bool 9412simplify_cond_using_ranges (gcond *stmt) 9413{ 9414 tree op0 = gimple_cond_lhs (stmt); 9415 tree op1 = gimple_cond_rhs (stmt); 9416 enum tree_code cond_code = gimple_cond_code (stmt); 9417 9418 if (cond_code != NE_EXPR 9419 && cond_code != EQ_EXPR 9420 && TREE_CODE (op0) == SSA_NAME 9421 && INTEGRAL_TYPE_P (TREE_TYPE (op0)) 9422 && is_gimple_min_invariant (op1)) 9423 { 9424 value_range_t *vr = get_value_range (op0); 9425 9426 /* If we have range information for OP0, then we might be 9427 able to simplify this conditional. */ 9428 if (vr->type == VR_RANGE) 9429 { 9430 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; 9431 bool sop = false; 9432 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop); 9433 9434 if (new_tree 9435 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))) 9436 { 9437 if (dump_file) 9438 { 9439 fprintf (dump_file, "Simplified relational "); 9440 print_gimple_stmt (dump_file, stmt, 0, 0); 9441 fprintf (dump_file, " into "); 9442 } 9443 9444 gimple_cond_set_code (stmt, EQ_EXPR); 9445 gimple_cond_set_lhs (stmt, op0); 9446 gimple_cond_set_rhs (stmt, new_tree); 9447 9448 update_stmt (stmt); 9449 9450 if (dump_file) 9451 { 9452 print_gimple_stmt (dump_file, stmt, 0, 0); 9453 fprintf (dump_file, "\n"); 9454 } 9455 9456 if (sop && issue_strict_overflow_warning (wc)) 9457 { 9458 location_t location = input_location; 9459 if (gimple_has_location (stmt)) 9460 location = gimple_location (stmt); 9461 9462 warning_at (location, OPT_Wstrict_overflow, 9463 "assuming signed overflow does not occur when " 9464 "simplifying conditional"); 9465 } 9466 9467 return true; 9468 } 9469 9470 /* Try again after inverting the condition. We only deal 9471 with integral types here, so no need to worry about 9472 issues with inverting FP comparisons. */ 9473 sop = false; 9474 new_tree = test_for_singularity 9475 (invert_tree_comparison (cond_code, false), 9476 op0, op1, vr, &sop); 9477 9478 if (new_tree 9479 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))) 9480 { 9481 if (dump_file) 9482 { 9483 fprintf (dump_file, "Simplified relational "); 9484 print_gimple_stmt (dump_file, stmt, 0, 0); 9485 fprintf (dump_file, " into "); 9486 } 9487 9488 gimple_cond_set_code (stmt, NE_EXPR); 9489 gimple_cond_set_lhs (stmt, op0); 9490 gimple_cond_set_rhs (stmt, new_tree); 9491 9492 update_stmt (stmt); 9493 9494 if (dump_file) 9495 { 9496 print_gimple_stmt (dump_file, stmt, 0, 0); 9497 fprintf (dump_file, "\n"); 9498 } 9499 9500 if (sop && issue_strict_overflow_warning (wc)) 9501 { 9502 location_t location = input_location; 9503 if (gimple_has_location (stmt)) 9504 location = gimple_location (stmt); 9505 9506 warning_at (location, OPT_Wstrict_overflow, 9507 "assuming signed overflow does not occur when " 9508 "simplifying conditional"); 9509 } 9510 9511 return true; 9512 } 9513 } 9514 } 9515 9516 /* If we have a comparison of an SSA_NAME (OP0) against a constant, 9517 see if OP0 was set by a type conversion where the source of 9518 the conversion is another SSA_NAME with a range that fits 9519 into the range of OP0's type. 9520 9521 If so, the conversion is redundant as the earlier SSA_NAME can be 9522 used for the comparison directly if we just massage the constant in the 9523 comparison. */ 9524 if (TREE_CODE (op0) == SSA_NAME 9525 && TREE_CODE (op1) == INTEGER_CST) 9526 { 9527 gimple def_stmt = SSA_NAME_DEF_STMT (op0); 9528 tree innerop; 9529 9530 if (!is_gimple_assign (def_stmt) 9531 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) 9532 return false; 9533 9534 innerop = gimple_assign_rhs1 (def_stmt); 9535 9536 if (TREE_CODE (innerop) == SSA_NAME 9537 && !POINTER_TYPE_P (TREE_TYPE (innerop)) 9538 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) 9539 { 9540 value_range_t *vr = get_value_range (innerop); 9541 9542 if (range_int_cst_p (vr) 9543 && range_fits_type_p (vr, 9544 TYPE_PRECISION (TREE_TYPE (op0)), 9545 TYPE_SIGN (TREE_TYPE (op0))) 9546 && int_fits_type_p (op1, TREE_TYPE (innerop)) 9547 /* The range must not have overflowed, or if it did overflow 9548 we must not be wrapping/trapping overflow and optimizing 9549 with strict overflow semantics. */ 9550 && ((!is_negative_overflow_infinity (vr->min) 9551 && !is_positive_overflow_infinity (vr->max)) 9552 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop)))) 9553 { 9554 /* If the range overflowed and the user has asked for warnings 9555 when strict overflow semantics were used to optimize code, 9556 issue an appropriate warning. */ 9557 if (cond_code != EQ_EXPR && cond_code != NE_EXPR 9558 && (is_negative_overflow_infinity (vr->min) 9559 || is_positive_overflow_infinity (vr->max)) 9560 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL)) 9561 { 9562 location_t location; 9563 9564 if (!gimple_has_location (stmt)) 9565 location = input_location; 9566 else 9567 location = gimple_location (stmt); 9568 warning_at (location, OPT_Wstrict_overflow, 9569 "assuming signed overflow does not occur when " 9570 "simplifying conditional"); 9571 } 9572 9573 tree newconst = fold_convert (TREE_TYPE (innerop), op1); 9574 gimple_cond_set_lhs (stmt, innerop); 9575 gimple_cond_set_rhs (stmt, newconst); 9576 return true; 9577 } 9578 } 9579 } 9580 9581 return false; 9582} 9583 9584/* Simplify a switch statement using the value range of the switch 9585 argument. */ 9586 9587static bool 9588simplify_switch_using_ranges (gswitch *stmt) 9589{ 9590 tree op = gimple_switch_index (stmt); 9591 value_range_t *vr; 9592 bool take_default; 9593 edge e; 9594 edge_iterator ei; 9595 size_t i = 0, j = 0, n, n2; 9596 tree vec2; 9597 switch_update su; 9598 size_t k = 1, l = 0; 9599 9600 if (TREE_CODE (op) == SSA_NAME) 9601 { 9602 vr = get_value_range (op); 9603 9604 /* We can only handle integer ranges. */ 9605 if ((vr->type != VR_RANGE 9606 && vr->type != VR_ANTI_RANGE) 9607 || symbolic_range_p (vr)) 9608 return false; 9609 9610 /* Find case label for min/max of the value range. */ 9611 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); 9612 } 9613 else if (TREE_CODE (op) == INTEGER_CST) 9614 { 9615 take_default = !find_case_label_index (stmt, 1, op, &i); 9616 if (take_default) 9617 { 9618 i = 1; 9619 j = 0; 9620 } 9621 else 9622 { 9623 j = i; 9624 } 9625 } 9626 else 9627 return false; 9628 9629 n = gimple_switch_num_labels (stmt); 9630 9631 /* Bail out if this is just all edges taken. */ 9632 if (i == 1 9633 && j == n - 1 9634 && take_default) 9635 return false; 9636 9637 /* Build a new vector of taken case labels. */ 9638 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); 9639 n2 = 0; 9640 9641 /* Add the default edge, if necessary. */ 9642 if (take_default) 9643 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); 9644 9645 for (; i <= j; ++i, ++n2) 9646 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); 9647 9648 for (; k <= l; ++k, ++n2) 9649 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); 9650 9651 /* Mark needed edges. */ 9652 for (i = 0; i < n2; ++i) 9653 { 9654 e = find_edge (gimple_bb (stmt), 9655 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); 9656 e->aux = (void *)-1; 9657 } 9658 9659 /* Queue not needed edges for later removal. */ 9660 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) 9661 { 9662 if (e->aux == (void *)-1) 9663 { 9664 e->aux = NULL; 9665 continue; 9666 } 9667 9668 if (dump_file && (dump_flags & TDF_DETAILS)) 9669 { 9670 fprintf (dump_file, "removing unreachable case label\n"); 9671 } 9672 to_remove_edges.safe_push (e); 9673 e->flags &= ~EDGE_EXECUTABLE; 9674 } 9675 9676 /* And queue an update for the stmt. */ 9677 su.stmt = stmt; 9678 su.vec = vec2; 9679 to_update_switch_stmts.safe_push (su); 9680 return false; 9681} 9682 9683/* Simplify an integral conversion from an SSA name in STMT. */ 9684 9685static bool 9686simplify_conversion_using_ranges (gimple stmt) 9687{ 9688 tree innerop, middleop, finaltype; 9689 gimple def_stmt; 9690 value_range_t *innervr; 9691 signop inner_sgn, middle_sgn, final_sgn; 9692 unsigned inner_prec, middle_prec, final_prec; 9693 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; 9694 9695 finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); 9696 if (!INTEGRAL_TYPE_P (finaltype)) 9697 return false; 9698 middleop = gimple_assign_rhs1 (stmt); 9699 def_stmt = SSA_NAME_DEF_STMT (middleop); 9700 if (!is_gimple_assign (def_stmt) 9701 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) 9702 return false; 9703 innerop = gimple_assign_rhs1 (def_stmt); 9704 if (TREE_CODE (innerop) != SSA_NAME 9705 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) 9706 return false; 9707 9708 /* Get the value-range of the inner operand. */ 9709 innervr = get_value_range (innerop); 9710 if (innervr->type != VR_RANGE 9711 || TREE_CODE (innervr->min) != INTEGER_CST 9712 || TREE_CODE (innervr->max) != INTEGER_CST) 9713 return false; 9714 9715 /* Simulate the conversion chain to check if the result is equal if 9716 the middle conversion is removed. */ 9717 innermin = wi::to_widest (innervr->min); 9718 innermax = wi::to_widest (innervr->max); 9719 9720 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); 9721 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); 9722 final_prec = TYPE_PRECISION (finaltype); 9723 9724 /* If the first conversion is not injective, the second must not 9725 be widening. */ 9726 if (wi::gtu_p (innermax - innermin, 9727 wi::mask <widest_int> (middle_prec, false)) 9728 && middle_prec < final_prec) 9729 return false; 9730 /* We also want a medium value so that we can track the effect that 9731 narrowing conversions with sign change have. */ 9732 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); 9733 if (inner_sgn == UNSIGNED) 9734 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); 9735 else 9736 innermed = 0; 9737 if (wi::cmp (innermin, innermed, inner_sgn) >= 0 9738 || wi::cmp (innermed, innermax, inner_sgn) >= 0) 9739 innermed = innermin; 9740 9741 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); 9742 middlemin = wi::ext (innermin, middle_prec, middle_sgn); 9743 middlemed = wi::ext (innermed, middle_prec, middle_sgn); 9744 middlemax = wi::ext (innermax, middle_prec, middle_sgn); 9745 9746 /* Require that the final conversion applied to both the original 9747 and the intermediate range produces the same result. */ 9748 final_sgn = TYPE_SIGN (finaltype); 9749 if (wi::ext (middlemin, final_prec, final_sgn) 9750 != wi::ext (innermin, final_prec, final_sgn) 9751 || wi::ext (middlemed, final_prec, final_sgn) 9752 != wi::ext (innermed, final_prec, final_sgn) 9753 || wi::ext (middlemax, final_prec, final_sgn) 9754 != wi::ext (innermax, final_prec, final_sgn)) 9755 return false; 9756 9757 gimple_assign_set_rhs1 (stmt, innerop); 9758 update_stmt (stmt); 9759 return true; 9760} 9761 9762/* Simplify a conversion from integral SSA name to float in STMT. */ 9763 9764static bool 9765simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 9766{ 9767 tree rhs1 = gimple_assign_rhs1 (stmt); 9768 value_range_t *vr = get_value_range (rhs1); 9769 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); 9770 machine_mode mode; 9771 tree tem; 9772 gassign *conv; 9773 9774 /* We can only handle constant ranges. */ 9775 if (vr->type != VR_RANGE 9776 || TREE_CODE (vr->min) != INTEGER_CST 9777 || TREE_CODE (vr->max) != INTEGER_CST) 9778 return false; 9779 9780 /* First check if we can use a signed type in place of an unsigned. */ 9781 if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) 9782 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) 9783 != CODE_FOR_nothing) 9784 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) 9785 mode = TYPE_MODE (TREE_TYPE (rhs1)); 9786 /* If we can do the conversion in the current input mode do nothing. */ 9787 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 9788 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) 9789 return false; 9790 /* Otherwise search for a mode we can use, starting from the narrowest 9791 integer mode available. */ 9792 else 9793 { 9794 mode = GET_CLASS_NARROWEST_MODE (MODE_INT); 9795 do 9796 { 9797 /* If we cannot do a signed conversion to float from mode 9798 or if the value-range does not fit in the signed type 9799 try with a wider mode. */ 9800 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing 9801 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) 9802 break; 9803 9804 mode = GET_MODE_WIDER_MODE (mode); 9805 /* But do not widen the input. Instead leave that to the 9806 optabs expansion code. */ 9807 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) 9808 return false; 9809 } 9810 while (mode != VOIDmode); 9811 if (mode == VOIDmode) 9812 return false; 9813 } 9814 9815 /* It works, insert a truncation or sign-change before the 9816 float conversion. */ 9817 tem = make_ssa_name (build_nonstandard_integer_type 9818 (GET_MODE_PRECISION (mode), 0)); 9819 conv = gimple_build_assign (tem, NOP_EXPR, rhs1); 9820 gsi_insert_before (gsi, conv, GSI_SAME_STMT); 9821 gimple_assign_set_rhs1 (stmt, tem); 9822 update_stmt (stmt); 9823 9824 return true; 9825} 9826 9827/* Simplify an internal fn call using ranges if possible. */ 9828 9829static bool 9830simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 9831{ 9832 enum tree_code subcode; 9833 bool is_ubsan = false; 9834 bool ovf = false; 9835 switch (gimple_call_internal_fn (stmt)) 9836 { 9837 case IFN_UBSAN_CHECK_ADD: 9838 subcode = PLUS_EXPR; 9839 is_ubsan = true; 9840 break; 9841 case IFN_UBSAN_CHECK_SUB: 9842 subcode = MINUS_EXPR; 9843 is_ubsan = true; 9844 break; 9845 case IFN_UBSAN_CHECK_MUL: 9846 subcode = MULT_EXPR; 9847 is_ubsan = true; 9848 break; 9849 case IFN_ADD_OVERFLOW: 9850 subcode = PLUS_EXPR; 9851 break; 9852 case IFN_SUB_OVERFLOW: 9853 subcode = MINUS_EXPR; 9854 break; 9855 case IFN_MUL_OVERFLOW: 9856 subcode = MULT_EXPR; 9857 break; 9858 default: 9859 return false; 9860 } 9861 9862 tree op0 = gimple_call_arg (stmt, 0); 9863 tree op1 = gimple_call_arg (stmt, 1); 9864 tree type; 9865 if (is_ubsan) 9866 type = TREE_TYPE (op0); 9867 else if (gimple_call_lhs (stmt) == NULL_TREE) 9868 return false; 9869 else 9870 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt))); 9871 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf) 9872 || (is_ubsan && ovf)) 9873 return false; 9874 9875 gimple g; 9876 location_t loc = gimple_location (stmt); 9877 if (is_ubsan) 9878 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1); 9879 else 9880 { 9881 int prec = TYPE_PRECISION (type); 9882 tree utype = type; 9883 if (ovf 9884 || !useless_type_conversion_p (type, TREE_TYPE (op0)) 9885 || !useless_type_conversion_p (type, TREE_TYPE (op1))) 9886 utype = build_nonstandard_integer_type (prec, 1); 9887 if (TREE_CODE (op0) == INTEGER_CST) 9888 op0 = fold_convert (utype, op0); 9889 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0))) 9890 { 9891 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0); 9892 gimple_set_location (g, loc); 9893 gsi_insert_before (gsi, g, GSI_SAME_STMT); 9894 op0 = gimple_assign_lhs (g); 9895 } 9896 if (TREE_CODE (op1) == INTEGER_CST) 9897 op1 = fold_convert (utype, op1); 9898 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1))) 9899 { 9900 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1); 9901 gimple_set_location (g, loc); 9902 gsi_insert_before (gsi, g, GSI_SAME_STMT); 9903 op1 = gimple_assign_lhs (g); 9904 } 9905 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1); 9906 gimple_set_location (g, loc); 9907 gsi_insert_before (gsi, g, GSI_SAME_STMT); 9908 if (utype != type) 9909 { 9910 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR, 9911 gimple_assign_lhs (g)); 9912 gimple_set_location (g, loc); 9913 gsi_insert_before (gsi, g, GSI_SAME_STMT); 9914 } 9915 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR, 9916 gimple_assign_lhs (g), 9917 build_int_cst (type, ovf)); 9918 } 9919 gimple_set_location (g, loc); 9920 gsi_replace (gsi, g, false); 9921 return true; 9922} 9923 9924/* Simplify STMT using ranges if possible. */ 9925 9926static bool 9927simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) 9928{ 9929 gimple stmt = gsi_stmt (*gsi); 9930 if (is_gimple_assign (stmt)) 9931 { 9932 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 9933 tree rhs1 = gimple_assign_rhs1 (stmt); 9934 9935 switch (rhs_code) 9936 { 9937 case EQ_EXPR: 9938 case NE_EXPR: 9939 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity 9940 if the RHS is zero or one, and the LHS are known to be boolean 9941 values. */ 9942 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9943 return simplify_truth_ops_using_ranges (gsi, stmt); 9944 break; 9945 9946 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR 9947 and BIT_AND_EXPR respectively if the first operand is greater 9948 than zero and the second operand is an exact power of two. 9949 Also optimize TRUNC_MOD_EXPR away if the second operand is 9950 constant and the first operand already has the right value 9951 range. */ 9952 case TRUNC_DIV_EXPR: 9953 case TRUNC_MOD_EXPR: 9954 if (TREE_CODE (rhs1) == SSA_NAME 9955 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9956 return simplify_div_or_mod_using_ranges (stmt); 9957 break; 9958 9959 /* Transform ABS (X) into X or -X as appropriate. */ 9960 case ABS_EXPR: 9961 if (TREE_CODE (rhs1) == SSA_NAME 9962 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9963 return simplify_abs_using_ranges (stmt); 9964 break; 9965 9966 case BIT_AND_EXPR: 9967 case BIT_IOR_EXPR: 9968 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR 9969 if all the bits being cleared are already cleared or 9970 all the bits being set are already set. */ 9971 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9972 return simplify_bit_ops_using_ranges (gsi, stmt); 9973 break; 9974 9975 CASE_CONVERT: 9976 if (TREE_CODE (rhs1) == SSA_NAME 9977 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9978 return simplify_conversion_using_ranges (stmt); 9979 break; 9980 9981 case FLOAT_EXPR: 9982 if (TREE_CODE (rhs1) == SSA_NAME 9983 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 9984 return simplify_float_conversion_using_ranges (gsi, stmt); 9985 break; 9986 9987 default: 9988 break; 9989 } 9990 } 9991 else if (gimple_code (stmt) == GIMPLE_COND) 9992 return simplify_cond_using_ranges (as_a <gcond *> (stmt)); 9993 else if (gimple_code (stmt) == GIMPLE_SWITCH) 9994 return simplify_switch_using_ranges (as_a <gswitch *> (stmt)); 9995 else if (is_gimple_call (stmt) 9996 && gimple_call_internal_p (stmt)) 9997 return simplify_internal_call_using_ranges (gsi, stmt); 9998 9999 return false; 10000} 10001 10002/* If the statement pointed by SI has a predicate whose value can be 10003 computed using the value range information computed by VRP, compute 10004 its value and return true. Otherwise, return false. */ 10005 10006static bool 10007fold_predicate_in (gimple_stmt_iterator *si) 10008{ 10009 bool assignment_p = false; 10010 tree val; 10011 gimple stmt = gsi_stmt (*si); 10012 10013 if (is_gimple_assign (stmt) 10014 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) 10015 { 10016 assignment_p = true; 10017 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), 10018 gimple_assign_rhs1 (stmt), 10019 gimple_assign_rhs2 (stmt), 10020 stmt); 10021 } 10022 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) 10023 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt), 10024 gimple_cond_lhs (cond_stmt), 10025 gimple_cond_rhs (cond_stmt), 10026 stmt); 10027 else 10028 return false; 10029 10030 if (val) 10031 { 10032 if (assignment_p) 10033 val = fold_convert (gimple_expr_type (stmt), val); 10034 10035 if (dump_file) 10036 { 10037 fprintf (dump_file, "Folding predicate "); 10038 print_gimple_expr (dump_file, stmt, 0, 0); 10039 fprintf (dump_file, " to "); 10040 print_generic_expr (dump_file, val, 0); 10041 fprintf (dump_file, "\n"); 10042 } 10043 10044 if (is_gimple_assign (stmt)) 10045 gimple_assign_set_rhs_from_tree (si, val); 10046 else 10047 { 10048 gcc_assert (gimple_code (stmt) == GIMPLE_COND); 10049 gcond *cond_stmt = as_a <gcond *> (stmt); 10050 if (integer_zerop (val)) 10051 gimple_cond_make_false (cond_stmt); 10052 else if (integer_onep (val)) 10053 gimple_cond_make_true (cond_stmt); 10054 else 10055 gcc_unreachable (); 10056 } 10057 10058 return true; 10059 } 10060 10061 return false; 10062} 10063 10064/* Callback for substitute_and_fold folding the stmt at *SI. */ 10065 10066static bool 10067vrp_fold_stmt (gimple_stmt_iterator *si) 10068{ 10069 if (fold_predicate_in (si)) 10070 return true; 10071 10072 return simplify_stmt_using_ranges (si); 10073} 10074 10075/* Stack of dest,src equivalency pairs that need to be restored after 10076 each attempt to thread a block's incoming edge to an outgoing edge. 10077 10078 A NULL entry is used to mark the end of pairs which need to be 10079 restored. */ 10080static vec<tree> equiv_stack; 10081 10082/* A trivial wrapper so that we can present the generic jump threading 10083 code with a simple API for simplifying statements. STMT is the 10084 statement we want to simplify, WITHIN_STMT provides the location 10085 for any overflow warnings. */ 10086 10087static tree 10088simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) 10089{ 10090 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) 10091 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt), 10092 gimple_cond_lhs (cond_stmt), 10093 gimple_cond_rhs (cond_stmt), 10094 within_stmt); 10095 10096 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt)) 10097 { 10098 value_range_t new_vr = VR_INITIALIZER; 10099 tree lhs = gimple_assign_lhs (assign_stmt); 10100 10101 if (TREE_CODE (lhs) == SSA_NAME 10102 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 10103 || POINTER_TYPE_P (TREE_TYPE (lhs)))) 10104 { 10105 extract_range_from_assignment (&new_vr, assign_stmt); 10106 if (range_int_cst_singleton_p (&new_vr)) 10107 return new_vr.min; 10108 } 10109 } 10110 10111 return NULL_TREE; 10112} 10113 10114/* Blocks which have more than one predecessor and more than 10115 one successor present jump threading opportunities, i.e., 10116 when the block is reached from a specific predecessor, we 10117 may be able to determine which of the outgoing edges will 10118 be traversed. When this optimization applies, we are able 10119 to avoid conditionals at runtime and we may expose secondary 10120 optimization opportunities. 10121 10122 This routine is effectively a driver for the generic jump 10123 threading code. It basically just presents the generic code 10124 with edges that may be suitable for jump threading. 10125 10126 Unlike DOM, we do not iterate VRP if jump threading was successful. 10127 While iterating may expose new opportunities for VRP, it is expected 10128 those opportunities would be very limited and the compile time cost 10129 to expose those opportunities would be significant. 10130 10131 As jump threading opportunities are discovered, they are registered 10132 for later realization. */ 10133 10134static void 10135identify_jump_threads (void) 10136{ 10137 basic_block bb; 10138 gcond *dummy; 10139 int i; 10140 edge e; 10141 10142 /* Ugh. When substituting values earlier in this pass we can 10143 wipe the dominance information. So rebuild the dominator 10144 information as we need it within the jump threading code. */ 10145 calculate_dominance_info (CDI_DOMINATORS); 10146 10147 /* We do not allow VRP information to be used for jump threading 10148 across a back edge in the CFG. Otherwise it becomes too 10149 difficult to avoid eliminating loop exit tests. Of course 10150 EDGE_DFS_BACK is not accurate at this time so we have to 10151 recompute it. */ 10152 mark_dfs_back_edges (); 10153 10154 /* Do not thread across edges we are about to remove. Just marking 10155 them as EDGE_DFS_BACK will do. */ 10156 FOR_EACH_VEC_ELT (to_remove_edges, i, e) 10157 e->flags |= EDGE_DFS_BACK; 10158 10159 /* Allocate our unwinder stack to unwind any temporary equivalences 10160 that might be recorded. */ 10161 equiv_stack.create (20); 10162 10163 /* To avoid lots of silly node creation, we create a single 10164 conditional and just modify it in-place when attempting to 10165 thread jumps. */ 10166 dummy = gimple_build_cond (EQ_EXPR, 10167 integer_zero_node, integer_zero_node, 10168 NULL, NULL); 10169 10170 /* Walk through all the blocks finding those which present a 10171 potential jump threading opportunity. We could set this up 10172 as a dominator walker and record data during the walk, but 10173 I doubt it's worth the effort for the classes of jump 10174 threading opportunities we are trying to identify at this 10175 point in compilation. */ 10176 FOR_EACH_BB_FN (bb, cfun) 10177 { 10178 gimple last; 10179 10180 /* If the generic jump threading code does not find this block 10181 interesting, then there is nothing to do. */ 10182 if (! potentially_threadable_block (bb)) 10183 continue; 10184 10185 last = last_stmt (bb); 10186 10187 /* We're basically looking for a switch or any kind of conditional with 10188 integral or pointer type arguments. Note the type of the second 10189 argument will be the same as the first argument, so no need to 10190 check it explicitly. 10191 10192 We also handle the case where there are no statements in the 10193 block. This come up with forwarder blocks that are not 10194 optimized away because they lead to a loop header. But we do 10195 want to thread through them as we can sometimes thread to the 10196 loop exit which is obviously profitable. */ 10197 if (!last 10198 || gimple_code (last) == GIMPLE_SWITCH 10199 || (gimple_code (last) == GIMPLE_COND 10200 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME 10201 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) 10202 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) 10203 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME 10204 || is_gimple_min_invariant (gimple_cond_rhs (last))))) 10205 { 10206 edge_iterator ei; 10207 10208 /* We've got a block with multiple predecessors and multiple 10209 successors which also ends in a suitable conditional or 10210 switch statement. For each predecessor, see if we can thread 10211 it to a specific successor. */ 10212 FOR_EACH_EDGE (e, ei, bb->preds) 10213 { 10214 /* Do not thread across back edges or abnormal edges 10215 in the CFG. */ 10216 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) 10217 continue; 10218 10219 thread_across_edge (dummy, e, true, &equiv_stack, 10220 simplify_stmt_for_jump_threading); 10221 } 10222 } 10223 } 10224 10225 /* We do not actually update the CFG or SSA graphs at this point as 10226 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet 10227 handle ASSERT_EXPRs gracefully. */ 10228} 10229 10230/* We identified all the jump threading opportunities earlier, but could 10231 not transform the CFG at that time. This routine transforms the 10232 CFG and arranges for the dominator tree to be rebuilt if necessary. 10233 10234 Note the SSA graph update will occur during the normal TODO 10235 processing by the pass manager. */ 10236static void 10237finalize_jump_threads (void) 10238{ 10239 thread_through_all_blocks (false); 10240 equiv_stack.release (); 10241} 10242 10243 10244/* Traverse all the blocks folding conditionals with known ranges. */ 10245 10246static void 10247vrp_finalize (void) 10248{ 10249 size_t i; 10250 10251 values_propagated = true; 10252 10253 if (dump_file) 10254 { 10255 fprintf (dump_file, "\nValue ranges after VRP:\n\n"); 10256 dump_all_value_ranges (dump_file); 10257 fprintf (dump_file, "\n"); 10258 } 10259 10260 substitute_and_fold (op_with_constant_singleton_value_range, 10261 vrp_fold_stmt, false); 10262 10263 if (warn_array_bounds && first_pass_instance) 10264 check_all_array_refs (); 10265 10266 /* We must identify jump threading opportunities before we release 10267 the datastructures built by VRP. */ 10268 identify_jump_threads (); 10269 10270 /* Set value range to non pointer SSA_NAMEs. */ 10271 for (i = 0; i < num_vr_values; i++) 10272 if (vr_value[i]) 10273 { 10274 tree name = ssa_name (i); 10275 10276 if (!name 10277 || POINTER_TYPE_P (TREE_TYPE (name)) 10278 || (vr_value[i]->type == VR_VARYING) 10279 || (vr_value[i]->type == VR_UNDEFINED)) 10280 continue; 10281 10282 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST) 10283 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST) 10284 && (vr_value[i]->type == VR_RANGE 10285 || vr_value[i]->type == VR_ANTI_RANGE)) 10286 set_range_info (name, vr_value[i]->type, vr_value[i]->min, 10287 vr_value[i]->max); 10288 } 10289 10290 /* Free allocated memory. */ 10291 for (i = 0; i < num_vr_values; i++) 10292 if (vr_value[i]) 10293 { 10294 BITMAP_FREE (vr_value[i]->equiv); 10295 free (vr_value[i]); 10296 } 10297 10298 free (vr_value); 10299 free (vr_phi_edge_counts); 10300 10301 /* So that we can distinguish between VRP data being available 10302 and not available. */ 10303 vr_value = NULL; 10304 vr_phi_edge_counts = NULL; 10305} 10306 10307 10308/* Main entry point to VRP (Value Range Propagation). This pass is 10309 loosely based on J. R. C. Patterson, ``Accurate Static Branch 10310 Prediction by Value Range Propagation,'' in SIGPLAN Conference on 10311 Programming Language Design and Implementation, pp. 67-78, 1995. 10312 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html 10313 10314 This is essentially an SSA-CCP pass modified to deal with ranges 10315 instead of constants. 10316 10317 While propagating ranges, we may find that two or more SSA name 10318 have equivalent, though distinct ranges. For instance, 10319 10320 1 x_9 = p_3->a; 10321 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> 10322 3 if (p_4 == q_2) 10323 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; 10324 5 endif 10325 6 if (q_2) 10326 10327 In the code above, pointer p_5 has range [q_2, q_2], but from the 10328 code we can also determine that p_5 cannot be NULL and, if q_2 had 10329 a non-varying range, p_5's range should also be compatible with it. 10330 10331 These equivalences are created by two expressions: ASSERT_EXPR and 10332 copy operations. Since p_5 is an assertion on p_4, and p_4 was the 10333 result of another assertion, then we can use the fact that p_5 and 10334 p_4 are equivalent when evaluating p_5's range. 10335 10336 Together with value ranges, we also propagate these equivalences 10337 between names so that we can take advantage of information from 10338 multiple ranges when doing final replacement. Note that this 10339 equivalency relation is transitive but not symmetric. 10340 10341 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we 10342 cannot assert that q_2 is equivalent to p_5 because q_2 may be used 10343 in contexts where that assertion does not hold (e.g., in line 6). 10344 10345 TODO, the main difference between this pass and Patterson's is that 10346 we do not propagate edge probabilities. We only compute whether 10347 edges can be taken or not. That is, instead of having a spectrum 10348 of jump probabilities between 0 and 1, we only deal with 0, 1 and 10349 DON'T KNOW. In the future, it may be worthwhile to propagate 10350 probabilities to aid branch prediction. */ 10351 10352static unsigned int 10353execute_vrp (void) 10354{ 10355 int i; 10356 edge e; 10357 switch_update *su; 10358 10359 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); 10360 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); 10361 scev_initialize (); 10362 10363 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. 10364 Inserting assertions may split edges which will invalidate 10365 EDGE_DFS_BACK. */ 10366 insert_range_assertions (); 10367 10368 to_remove_edges.create (10); 10369 to_update_switch_stmts.create (5); 10370 threadedge_initialize_values (); 10371 10372 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ 10373 mark_dfs_back_edges (); 10374 10375 vrp_initialize (); 10376 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); 10377 vrp_finalize (); 10378 10379 free_numbers_of_iterations_estimates (); 10380 10381 /* ASSERT_EXPRs must be removed before finalizing jump threads 10382 as finalizing jump threads calls the CFG cleanup code which 10383 does not properly handle ASSERT_EXPRs. */ 10384 remove_range_assertions (); 10385 10386 /* If we exposed any new variables, go ahead and put them into 10387 SSA form now, before we handle jump threading. This simplifies 10388 interactions between rewriting of _DECL nodes into SSA form 10389 and rewriting SSA_NAME nodes into SSA form after block 10390 duplication and CFG manipulation. */ 10391 update_ssa (TODO_update_ssa); 10392 10393 finalize_jump_threads (); 10394 10395 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the 10396 CFG in a broken state and requires a cfg_cleanup run. */ 10397 FOR_EACH_VEC_ELT (to_remove_edges, i, e) 10398 remove_edge (e); 10399 /* Update SWITCH_EXPR case label vector. */ 10400 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) 10401 { 10402 size_t j; 10403 size_t n = TREE_VEC_LENGTH (su->vec); 10404 tree label; 10405 gimple_switch_set_num_labels (su->stmt, n); 10406 for (j = 0; j < n; j++) 10407 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); 10408 /* As we may have replaced the default label with a regular one 10409 make sure to make it a real default label again. This ensures 10410 optimal expansion. */ 10411 label = gimple_switch_label (su->stmt, 0); 10412 CASE_LOW (label) = NULL_TREE; 10413 CASE_HIGH (label) = NULL_TREE; 10414 } 10415 10416 if (to_remove_edges.length () > 0) 10417 { 10418 free_dominance_info (CDI_DOMINATORS); 10419 loops_state_set (LOOPS_NEED_FIXUP); 10420 } 10421 10422 to_remove_edges.release (); 10423 to_update_switch_stmts.release (); 10424 threadedge_finalize_values (); 10425 10426 scev_finalize (); 10427 loop_optimizer_finalize (); 10428 return 0; 10429} 10430 10431namespace { 10432 10433const pass_data pass_data_vrp = 10434{ 10435 GIMPLE_PASS, /* type */ 10436 "vrp", /* name */ 10437 OPTGROUP_NONE, /* optinfo_flags */ 10438 TV_TREE_VRP, /* tv_id */ 10439 PROP_ssa, /* properties_required */ 10440 0, /* properties_provided */ 10441 0, /* properties_destroyed */ 10442 0, /* todo_flags_start */ 10443 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ 10444}; 10445 10446class pass_vrp : public gimple_opt_pass 10447{ 10448public: 10449 pass_vrp (gcc::context *ctxt) 10450 : gimple_opt_pass (pass_data_vrp, ctxt) 10451 {} 10452 10453 /* opt_pass methods: */ 10454 opt_pass * clone () { return new pass_vrp (m_ctxt); } 10455 virtual bool gate (function *) { return flag_tree_vrp != 0; } 10456 virtual unsigned int execute (function *) { return execute_vrp (); } 10457 10458}; // class pass_vrp 10459 10460} // anon namespace 10461 10462gimple_opt_pass * 10463make_pass_vrp (gcc::context *ctxt) 10464{ 10465 return new pass_vrp (ctxt); 10466} 10467