1/* SSA Jump Threading
2   Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3   Contributed by Jeff Law  <law@redhat.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "tree.h"
26#include "flags.h"
27#include "rtl.h"
28#include "tm_p.h"
29#include "ggc.h"
30#include "basic-block.h"
31#include "cfgloop.h"
32#include "output.h"
33#include "expr.h"
34#include "function.h"
35#include "diagnostic.h"
36#include "timevar.h"
37#include "tree-dump.h"
38#include "tree-flow.h"
39#include "real.h"
40#include "tree-pass.h"
41#include "tree-ssa-propagate.h"
42#include "langhooks.h"
43#include "params.h"
44
45/* To avoid code explosion due to jump threading, we limit the
46   number of statements we are going to copy.  This variable
47   holds the number of statements currently seen that we'll have
48   to copy as part of the jump threading process.  */
49static int stmt_count;
50
51/* Array to record value-handles per SSA_NAME.  */
52VEC(tree,heap) *ssa_name_values;
53
54/* Set the value for the SSA name NAME to VALUE.  */
55
56void
57set_ssa_name_value (tree name, tree value)
58{
59  if (SSA_NAME_VERSION (name) >= VEC_length (tree, ssa_name_values))
60    VEC_safe_grow_cleared (tree, heap, ssa_name_values,
61			   SSA_NAME_VERSION (name) + 1);
62  VEC_replace (tree, ssa_name_values, SSA_NAME_VERSION (name), value);
63}
64
65/* Initialize the per SSA_NAME value-handles array.  Returns it.  */
66void
67threadedge_initialize_values (void)
68{
69  gcc_assert (ssa_name_values == NULL);
70  ssa_name_values = VEC_alloc(tree, heap, num_ssa_names);
71}
72
73/* Free the per SSA_NAME value-handle array.  */
74void
75threadedge_finalize_values (void)
76{
77  VEC_free(tree, heap, ssa_name_values);
78}
79
80/* Return TRUE if we may be able to thread an incoming edge into
81   BB to an outgoing edge from BB.  Return FALSE otherwise.  */
82
83bool
84potentially_threadable_block (basic_block bb)
85{
86  gimple_stmt_iterator gsi;
87
88  /* If BB has a single successor or a single predecessor, then
89     there is no threading opportunity.  */
90  if (single_succ_p (bb) || single_pred_p (bb))
91    return false;
92
93  /* If BB does not end with a conditional, switch or computed goto,
94     then there is no threading opportunity.  */
95  gsi = gsi_last_bb (bb);
96  if (gsi_end_p (gsi)
97      || ! gsi_stmt (gsi)
98      || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
99	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
100	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
101    return false;
102
103  return true;
104}
105
106/* Return the LHS of any ASSERT_EXPR where OP appears as the first
107   argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
108   BB.  If no such ASSERT_EXPR is found, return OP.  */
109
110static tree
111lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
112{
113  imm_use_iterator imm_iter;
114  gimple use_stmt;
115  use_operand_p use_p;
116
117  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
118    {
119      use_stmt = USE_STMT (use_p);
120      if (use_stmt != stmt
121          && gimple_assign_single_p (use_stmt)
122          && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
123          && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
124	  && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
125	{
126	  return gimple_assign_lhs (use_stmt);
127	}
128    }
129  return op;
130}
131
132/* We record temporary equivalences created by PHI nodes or
133   statements within the target block.  Doing so allows us to
134   identify more jump threading opportunities, even in blocks
135   with side effects.
136
137   We keep track of those temporary equivalences in a stack
138   structure so that we can unwind them when we're done processing
139   a particular edge.  This routine handles unwinding the data
140   structures.  */
141
142static void
143remove_temporary_equivalences (VEC(tree, heap) **stack)
144{
145  while (VEC_length (tree, *stack) > 0)
146    {
147      tree prev_value, dest;
148
149      dest = VEC_pop (tree, *stack);
150
151      /* A NULL value indicates we should stop unwinding, otherwise
152	 pop off the next entry as they're recorded in pairs.  */
153      if (dest == NULL)
154	break;
155
156      prev_value = VEC_pop (tree, *stack);
157      set_ssa_name_value (dest, prev_value);
158    }
159}
160
161/* Record a temporary equivalence, saving enough information so that
162   we can restore the state of recorded equivalences when we're
163   done processing the current edge.  */
164
165static void
166record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
167{
168  tree prev_x = SSA_NAME_VALUE (x);
169
170  if (TREE_CODE (y) == SSA_NAME)
171    {
172      tree tmp = SSA_NAME_VALUE (y);
173      y = tmp ? tmp : y;
174    }
175
176  set_ssa_name_value (x, y);
177  VEC_reserve (tree, heap, *stack, 2);
178  VEC_quick_push (tree, *stack, prev_x);
179  VEC_quick_push (tree, *stack, x);
180}
181
182/* Record temporary equivalences created by PHIs at the target of the
183   edge E.  Record unwind information for the equivalences onto STACK.
184
185   If a PHI which prevents threading is encountered, then return FALSE
186   indicating we should not thread this edge, else return TRUE.  */
187
188static bool
189record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
190{
191  gimple_stmt_iterator gsi;
192
193  /* Each PHI creates a temporary equivalence, record them.
194     These are context sensitive equivalences and will be removed
195     later.  */
196  for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
197    {
198      gimple phi = gsi_stmt (gsi);
199      tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
200      tree dst = gimple_phi_result (phi);
201
202      /* If the desired argument is not the same as this PHI's result
203	 and it is set by a PHI in E->dest, then we can not thread
204	 through E->dest.  */
205      if (src != dst
206	  && TREE_CODE (src) == SSA_NAME
207	  && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
208	  && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
209	return false;
210
211      /* We consider any non-virtual PHI as a statement since it
212	 count result in a constant assignment or copy operation.  */
213      if (is_gimple_reg (dst))
214	stmt_count++;
215
216      record_temporary_equivalence (dst, src, stack);
217    }
218  return true;
219}
220
221/* Fold the RHS of an assignment statement and return it as a tree.
222   May return NULL_TREE if no simplification is possible.  */
223
224static tree
225fold_assignment_stmt (gimple stmt)
226{
227  enum tree_code subcode = gimple_assign_rhs_code (stmt);
228
229  switch (get_gimple_rhs_class (subcode))
230    {
231    case GIMPLE_SINGLE_RHS:
232      {
233        tree rhs = gimple_assign_rhs1 (stmt);
234
235        if (TREE_CODE (rhs) == COND_EXPR)
236          {
237            /* Sadly, we have to handle conditional assignments specially
238               here, because fold expects all the operands of an expression
239               to be folded before the expression itself is folded, but we
240               can't just substitute the folded condition here.  */
241            tree cond = fold (COND_EXPR_COND (rhs));
242            if (cond == boolean_true_node)
243              rhs = COND_EXPR_THEN (rhs);
244            else if (cond == boolean_false_node)
245              rhs = COND_EXPR_ELSE (rhs);
246          }
247
248        return fold (rhs);
249      }
250      break;
251    case GIMPLE_UNARY_RHS:
252      {
253        tree lhs = gimple_assign_lhs (stmt);
254        tree op0 = gimple_assign_rhs1 (stmt);
255        return fold_unary (subcode, TREE_TYPE (lhs), op0);
256      }
257      break;
258    case GIMPLE_BINARY_RHS:
259      {
260        tree lhs = gimple_assign_lhs (stmt);
261        tree op0 = gimple_assign_rhs1 (stmt);
262        tree op1 = gimple_assign_rhs2 (stmt);
263        return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
264      }
265      break;
266    default:
267      gcc_unreachable ();
268    }
269}
270
271/* Try to simplify each statement in E->dest, ultimately leading to
272   a simplification of the COND_EXPR at the end of E->dest.
273
274   Record unwind information for temporary equivalences onto STACK.
275
276   Use SIMPLIFY (a pointer to a callback function) to further simplify
277   statements using pass specific information.
278
279   We might consider marking just those statements which ultimately
280   feed the COND_EXPR.  It's not clear if the overhead of bookkeeping
281   would be recovered by trying to simplify fewer statements.
282
283   If we are able to simplify a statement into the form
284   SSA_NAME = (SSA_NAME | gimple invariant), then we can record
285   a context sensitive equivalence which may help us simplify
286   later statements in E->dest.  */
287
288static gimple
289record_temporary_equivalences_from_stmts_at_dest (edge e,
290						  VEC(tree, heap) **stack,
291						  tree (*simplify) (gimple,
292								    gimple))
293{
294  gimple stmt = NULL;
295  gimple_stmt_iterator gsi;
296  int max_stmt_count;
297
298  max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
299
300  /* Walk through each statement in the block recording equivalences
301     we discover.  Note any equivalences we discover are context
302     sensitive (ie, are dependent on traversing E) and must be unwound
303     when we're finished processing E.  */
304  for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
305    {
306      tree cached_lhs = NULL;
307
308      stmt = gsi_stmt (gsi);
309
310      /* Ignore empty statements and labels.  */
311      if (gimple_code (stmt) == GIMPLE_NOP
312	  || gimple_code (stmt) == GIMPLE_LABEL
313	  || is_gimple_debug (stmt))
314	continue;
315
316      /* If the statement has volatile operands, then we assume we
317	 can not thread through this block.  This is overly
318	 conservative in some ways.  */
319      if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
320	return NULL;
321
322      /* If duplicating this block is going to cause too much code
323	 expansion, then do not thread through this block.  */
324      stmt_count++;
325      if (stmt_count > max_stmt_count)
326	return NULL;
327
328      /* If this is not a statement that sets an SSA_NAME to a new
329	 value, then do not try to simplify this statement as it will
330	 not simplify in any way that is helpful for jump threading.  */
331      if ((gimple_code (stmt) != GIMPLE_ASSIGN
332           || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
333          && (gimple_code (stmt) != GIMPLE_CALL
334              || gimple_call_lhs (stmt) == NULL_TREE
335              || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
336	continue;
337
338      /* The result of __builtin_object_size depends on all the arguments
339	 of a phi node. Temporarily using only one edge produces invalid
340	 results. For example
341
342	 if (x < 6)
343	   goto l;
344	 else
345	   goto l;
346
347	 l:
348	 r = PHI <&w[2].a[1](2), &a.a[6](3)>
349	 __builtin_object_size (r, 0)
350
351	 The result of __builtin_object_size is defined to be the maximum of
352	 remaining bytes. If we use only one edge on the phi, the result will
353	 change to be the remaining bytes for the corresponding phi argument.
354
355	 Similarly for __builtin_constant_p:
356
357	 r = PHI <1(2), 2(3)>
358	 __builtin_constant_p (r)
359
360	 Both PHI arguments are constant, but x ? 1 : 2 is still not
361	 constant.  */
362
363      if (is_gimple_call (stmt))
364	{
365	  tree fndecl = gimple_call_fndecl (stmt);
366	  if (fndecl
367	      && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
368		  || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
369	    continue;
370	}
371
372      /* At this point we have a statement which assigns an RHS to an
373	 SSA_VAR on the LHS.  We want to try and simplify this statement
374	 to expose more context sensitive equivalences which in turn may
375	 allow us to simplify the condition at the end of the loop.
376
377	 Handle simple copy operations as well as implied copies from
378	 ASSERT_EXPRs.  */
379      if (gimple_assign_single_p (stmt)
380          && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
381	cached_lhs = gimple_assign_rhs1 (stmt);
382      else if (gimple_assign_single_p (stmt)
383               && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
384	cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
385      else
386	{
387	  /* A statement that is not a trivial copy or ASSERT_EXPR.
388	     We're going to temporarily copy propagate the operands
389	     and see if that allows us to simplify this statement.  */
390	  tree *copy;
391	  ssa_op_iter iter;
392	  use_operand_p use_p;
393	  unsigned int num, i = 0;
394
395	  num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
396	  copy = XCNEWVEC (tree, num);
397
398	  /* Make a copy of the uses & vuses into USES_COPY, then cprop into
399	     the operands.  */
400	  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
401	    {
402	      tree tmp = NULL;
403	      tree use = USE_FROM_PTR (use_p);
404
405	      copy[i++] = use;
406	      if (TREE_CODE (use) == SSA_NAME)
407		tmp = SSA_NAME_VALUE (use);
408	      if (tmp)
409		SET_USE (use_p, tmp);
410	    }
411
412	  /* Try to fold/lookup the new expression.  Inserting the
413	     expression into the hash table is unlikely to help.  */
414          if (is_gimple_call (stmt))
415            cached_lhs = fold_call_stmt (stmt, false);
416	  else
417            cached_lhs = fold_assignment_stmt (stmt);
418
419          if (!cached_lhs
420              || (TREE_CODE (cached_lhs) != SSA_NAME
421                  && !is_gimple_min_invariant (cached_lhs)))
422            cached_lhs = (*simplify) (stmt, stmt);
423
424	  /* Restore the statement's original uses/defs.  */
425	  i = 0;
426	  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
427	    SET_USE (use_p, copy[i++]);
428
429	  free (copy);
430	}
431
432      /* Record the context sensitive equivalence if we were able
433	 to simplify this statement.  */
434      if (cached_lhs
435	  && (TREE_CODE (cached_lhs) == SSA_NAME
436	      || is_gimple_min_invariant (cached_lhs)))
437	record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
438    }
439  return stmt;
440}
441
442/* Simplify the control statement at the end of the block E->dest.
443
444   To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
445   is available to use/clobber in DUMMY_COND.
446
447   Use SIMPLIFY (a pointer to a callback function) to further simplify
448   a condition using pass specific information.
449
450   Return the simplified condition or NULL if simplification could
451   not be performed.  */
452
453static tree
454simplify_control_stmt_condition (edge e,
455				 gimple stmt,
456				 gimple dummy_cond,
457				 tree (*simplify) (gimple, gimple),
458				 bool handle_dominating_asserts)
459{
460  tree cond, cached_lhs;
461  enum gimple_code code = gimple_code (stmt);
462
463  /* For comparisons, we have to update both operands, then try
464     to simplify the comparison.  */
465  if (code == GIMPLE_COND)
466    {
467      tree op0, op1;
468      enum tree_code cond_code;
469
470      op0 = gimple_cond_lhs (stmt);
471      op1 = gimple_cond_rhs (stmt);
472      cond_code = gimple_cond_code (stmt);
473
474      /* Get the current value of both operands.  */
475      if (TREE_CODE (op0) == SSA_NAME)
476	{
477          tree tmp = SSA_NAME_VALUE (op0);
478	  if (tmp)
479	    op0 = tmp;
480	}
481
482      if (TREE_CODE (op1) == SSA_NAME)
483	{
484	  tree tmp = SSA_NAME_VALUE (op1);
485	  if (tmp)
486	    op1 = tmp;
487	}
488
489      if (handle_dominating_asserts)
490	{
491	  /* Now see if the operand was consumed by an ASSERT_EXPR
492	     which dominates E->src.  If so, we want to replace the
493	     operand with the LHS of the ASSERT_EXPR.  */
494	  if (TREE_CODE (op0) == SSA_NAME)
495	    op0 = lhs_of_dominating_assert (op0, e->src, stmt);
496
497	  if (TREE_CODE (op1) == SSA_NAME)
498	    op1 = lhs_of_dominating_assert (op1, e->src, stmt);
499	}
500
501      /* We may need to canonicalize the comparison.  For
502	 example, op0 might be a constant while op1 is an
503	 SSA_NAME.  Failure to canonicalize will cause us to
504	 miss threading opportunities.  */
505      if (tree_swap_operands_p (op0, op1, false))
506	{
507	  tree tmp;
508	  cond_code = swap_tree_comparison (cond_code);
509	  tmp = op0;
510	  op0 = op1;
511	  op1 = tmp;
512	}
513
514      /* Stuff the operator and operands into our dummy conditional
515	 expression.  */
516      gimple_cond_set_code (dummy_cond, cond_code);
517      gimple_cond_set_lhs (dummy_cond, op0);
518      gimple_cond_set_rhs (dummy_cond, op1);
519
520      /* We absolutely do not care about any type conversions
521         we only care about a zero/nonzero value.  */
522      fold_defer_overflow_warnings ();
523
524      cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
525      if (cached_lhs)
526	while (CONVERT_EXPR_P (cached_lhs))
527          cached_lhs = TREE_OPERAND (cached_lhs, 0);
528
529      fold_undefer_overflow_warnings ((cached_lhs
530                                       && is_gimple_min_invariant (cached_lhs)),
531				      stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
532
533      /* If we have not simplified the condition down to an invariant,
534	 then use the pass specific callback to simplify the condition.  */
535      if (!cached_lhs
536          || !is_gimple_min_invariant (cached_lhs))
537        cached_lhs = (*simplify) (dummy_cond, stmt);
538
539      return cached_lhs;
540    }
541
542  if (code == GIMPLE_SWITCH)
543    cond = gimple_switch_index (stmt);
544  else if (code == GIMPLE_GOTO)
545    cond = gimple_goto_dest (stmt);
546  else
547    gcc_unreachable ();
548
549  /* We can have conditionals which just test the state of a variable
550     rather than use a relational operator.  These are simpler to handle.  */
551  if (TREE_CODE (cond) == SSA_NAME)
552    {
553      cached_lhs = cond;
554
555      /* Get the variable's current value from the equivalence chains.
556
557	 It is possible to get loops in the SSA_NAME_VALUE chains
558	 (consider threading the backedge of a loop where we have
559	 a loop invariant SSA_NAME used in the condition.  */
560      if (cached_lhs
561	  && TREE_CODE (cached_lhs) == SSA_NAME
562	  && SSA_NAME_VALUE (cached_lhs))
563	cached_lhs = SSA_NAME_VALUE (cached_lhs);
564
565      /* If we're dominated by a suitable ASSERT_EXPR, then
566	 update CACHED_LHS appropriately.  */
567      if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
568	cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
569
570      /* If we haven't simplified to an invariant yet, then use the
571	 pass specific callback to try and simplify it further.  */
572      if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
573        cached_lhs = (*simplify) (stmt, stmt);
574    }
575  else
576    cached_lhs = NULL;
577
578  return cached_lhs;
579}
580
581/* We are exiting E->src, see if E->dest ends with a conditional
582   jump which has a known value when reached via E.
583
584   Special care is necessary if E is a back edge in the CFG as we
585   may have already recorded equivalences for E->dest into our
586   various tables, including the result of the conditional at
587   the end of E->dest.  Threading opportunities are severely
588   limited in that case to avoid short-circuiting the loop
589   incorrectly.
590
591   Note it is quite common for the first block inside a loop to
592   end with a conditional which is either always true or always
593   false when reached via the loop backedge.  Thus we do not want
594   to blindly disable threading across a loop backedge.
595
596   DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
597   to avoid allocating memory.
598
599   HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
600   the simplified condition with left-hand sides of ASSERT_EXPRs they are
601   used in.
602
603   STACK is used to undo temporary equivalences created during the walk of
604   E->dest.
605
606   SIMPLIFY is a pass-specific function used to simplify statements.  */
607
608void
609thread_across_edge (gimple dummy_cond,
610		    edge e,
611		    bool handle_dominating_asserts,
612		    VEC(tree, heap) **stack,
613		    tree (*simplify) (gimple, gimple))
614{
615  gimple stmt;
616
617  /* If E is a backedge, then we want to verify that the COND_EXPR,
618     SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
619     by any statements in e->dest.  If it is affected, then it is not
620     safe to thread this edge.  */
621  if (e->flags & EDGE_DFS_BACK)
622    {
623      ssa_op_iter iter;
624      use_operand_p use_p;
625      gimple last = gsi_stmt (gsi_last_bb (e->dest));
626
627      FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
628	{
629	  tree use = USE_FROM_PTR (use_p);
630
631          if (TREE_CODE (use) == SSA_NAME
632	      && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
633	      && gimple_bb (SSA_NAME_DEF_STMT (use)) == e->dest)
634	    goto fail;
635	}
636    }
637
638  stmt_count = 0;
639
640  /* PHIs create temporary equivalences.  */
641  if (!record_temporary_equivalences_from_phis (e, stack))
642    goto fail;
643
644  /* Now walk each statement recording any context sensitive
645     temporary equivalences we can detect.  */
646  stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
647  if (!stmt)
648    goto fail;
649
650  /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
651     will be taken.  */
652  if (gimple_code (stmt) == GIMPLE_COND
653      || gimple_code (stmt) == GIMPLE_GOTO
654      || gimple_code (stmt) == GIMPLE_SWITCH)
655    {
656      tree cond;
657
658      /* Extract and simplify the condition.  */
659      cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify, handle_dominating_asserts);
660
661      if (cond && is_gimple_min_invariant (cond))
662	{
663	  edge taken_edge = find_taken_edge (e->dest, cond);
664	  basic_block dest = (taken_edge ? taken_edge->dest : NULL);
665
666	  if (dest == e->dest)
667	    goto fail;
668
669	  remove_temporary_equivalences (stack);
670	  register_jump_thread (e, taken_edge);
671	}
672    }
673
674 fail:
675  remove_temporary_equivalences (stack);
676}
677