1/* SSA Jump Threading
2   Copyright (C) 2005-2015 Free Software Foundation, Inc.
3   Contributed by Jeff Law  <law@redhat.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "hash-set.h"
26#include "machmode.h"
27#include "vec.h"
28#include "double-int.h"
29#include "input.h"
30#include "alias.h"
31#include "symtab.h"
32#include "wide-int.h"
33#include "inchash.h"
34#include "tree.h"
35#include "fold-const.h"
36#include "flags.h"
37#include "tm_p.h"
38#include "predict.h"
39#include "hard-reg-set.h"
40#include "input.h"
41#include "function.h"
42#include "dominance.h"
43#include "basic-block.h"
44#include "cfgloop.h"
45#include "timevar.h"
46#include "dumpfile.h"
47#include "tree-ssa-alias.h"
48#include "internal-fn.h"
49#include "gimple-expr.h"
50#include "is-a.h"
51#include "gimple.h"
52#include "gimple-iterator.h"
53#include "gimple-ssa.h"
54#include "tree-cfg.h"
55#include "tree-phinodes.h"
56#include "ssa-iterators.h"
57#include "stringpool.h"
58#include "tree-ssanames.h"
59#include "tree-ssa-propagate.h"
60#include "tree-ssa-threadupdate.h"
61#include "langhooks.h"
62#include "params.h"
63#include "tree-ssa-threadedge.h"
64#include "tree-ssa-loop.h"
65#include "builtins.h"
66#include "cfg.h"
67#include "cfganal.h"
68
69/* To avoid code explosion due to jump threading, we limit the
70   number of statements we are going to copy.  This variable
71   holds the number of statements currently seen that we'll have
72   to copy as part of the jump threading process.  */
73static int stmt_count;
74
75/* Array to record value-handles per SSA_NAME.  */
76vec<tree> ssa_name_values;
77
78/* Set the value for the SSA name NAME to VALUE.  */
79
80void
81set_ssa_name_value (tree name, tree value)
82{
83  if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
84    ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
85  if (value && TREE_OVERFLOW_P (value))
86    value = drop_tree_overflow (value);
87  ssa_name_values[SSA_NAME_VERSION (name)] = value;
88}
89
90/* Initialize the per SSA_NAME value-handles array.  Returns it.  */
91void
92threadedge_initialize_values (void)
93{
94  gcc_assert (!ssa_name_values.exists ());
95  ssa_name_values.create (num_ssa_names);
96}
97
98/* Free the per SSA_NAME value-handle array.  */
99void
100threadedge_finalize_values (void)
101{
102  ssa_name_values.release ();
103}
104
105/* Return TRUE if we may be able to thread an incoming edge into
106   BB to an outgoing edge from BB.  Return FALSE otherwise.  */
107
108bool
109potentially_threadable_block (basic_block bb)
110{
111  gimple_stmt_iterator gsi;
112
113  /* Special case.  We can get blocks that are forwarders, but are
114     not optimized away because they forward from outside a loop
115     to the loop header.   We want to thread through them as we can
116     sometimes thread to the loop exit, which is obviously profitable.
117     the interesting case here is when the block has PHIs.  */
118  if (gsi_end_p (gsi_start_nondebug_bb (bb))
119      && !gsi_end_p (gsi_start_phis (bb)))
120    return true;
121
122  /* If BB has a single successor or a single predecessor, then
123     there is no threading opportunity.  */
124  if (single_succ_p (bb) || single_pred_p (bb))
125    return false;
126
127  /* If BB does not end with a conditional, switch or computed goto,
128     then there is no threading opportunity.  */
129  gsi = gsi_last_bb (bb);
130  if (gsi_end_p (gsi)
131      || ! gsi_stmt (gsi)
132      || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
133	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
134	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
135    return false;
136
137  return true;
138}
139
140/* Return the LHS of any ASSERT_EXPR where OP appears as the first
141   argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
142   BB.  If no such ASSERT_EXPR is found, return OP.  */
143
144static tree
145lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
146{
147  imm_use_iterator imm_iter;
148  gimple use_stmt;
149  use_operand_p use_p;
150
151  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
152    {
153      use_stmt = USE_STMT (use_p);
154      if (use_stmt != stmt
155          && gimple_assign_single_p (use_stmt)
156          && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
157          && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
158	  && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
159	{
160	  return gimple_assign_lhs (use_stmt);
161	}
162    }
163  return op;
164}
165
166/* We record temporary equivalences created by PHI nodes or
167   statements within the target block.  Doing so allows us to
168   identify more jump threading opportunities, even in blocks
169   with side effects.
170
171   We keep track of those temporary equivalences in a stack
172   structure so that we can unwind them when we're done processing
173   a particular edge.  This routine handles unwinding the data
174   structures.  */
175
176static void
177remove_temporary_equivalences (vec<tree> *stack)
178{
179  while (stack->length () > 0)
180    {
181      tree prev_value, dest;
182
183      dest = stack->pop ();
184
185      /* A NULL value indicates we should stop unwinding, otherwise
186	 pop off the next entry as they're recorded in pairs.  */
187      if (dest == NULL)
188	break;
189
190      prev_value = stack->pop ();
191      set_ssa_name_value (dest, prev_value);
192    }
193}
194
195/* Record a temporary equivalence, saving enough information so that
196   we can restore the state of recorded equivalences when we're
197   done processing the current edge.  */
198
199static void
200record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
201{
202  tree prev_x = SSA_NAME_VALUE (x);
203
204  /* Y may be NULL if we are invalidating entries in the table.  */
205  if (y && TREE_CODE (y) == SSA_NAME)
206    {
207      tree tmp = SSA_NAME_VALUE (y);
208      y = tmp ? tmp : y;
209    }
210
211  set_ssa_name_value (x, y);
212  stack->reserve (2);
213  stack->quick_push (prev_x);
214  stack->quick_push (x);
215}
216
217/* Record temporary equivalences created by PHIs at the target of the
218   edge E.  Record unwind information for the equivalences onto STACK.
219
220   If a PHI which prevents threading is encountered, then return FALSE
221   indicating we should not thread this edge, else return TRUE.
222
223   If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
224   of any equivalences recorded.  We use this to make invalidation after
225   traversing back edges less painful.  */
226
227static bool
228record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
229{
230  gphi_iterator gsi;
231
232  /* Each PHI creates a temporary equivalence, record them.
233     These are context sensitive equivalences and will be removed
234     later.  */
235  for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
236    {
237      gphi *phi = gsi.phi ();
238      tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
239      tree dst = gimple_phi_result (phi);
240
241      /* If the desired argument is not the same as this PHI's result
242	 and it is set by a PHI in E->dest, then we can not thread
243	 through E->dest.  */
244      if (src != dst
245	  && TREE_CODE (src) == SSA_NAME
246	  && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
247	  && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
248	return false;
249
250      /* We consider any non-virtual PHI as a statement since it
251	 count result in a constant assignment or copy operation.  */
252      if (!virtual_operand_p (dst))
253	stmt_count++;
254
255      record_temporary_equivalence (dst, src, stack);
256    }
257  return true;
258}
259
260/* Fold the RHS of an assignment statement and return it as a tree.
261   May return NULL_TREE if no simplification is possible.  */
262
263static tree
264fold_assignment_stmt (gimple stmt)
265{
266  enum tree_code subcode = gimple_assign_rhs_code (stmt);
267
268  switch (get_gimple_rhs_class (subcode))
269    {
270    case GIMPLE_SINGLE_RHS:
271      return fold (gimple_assign_rhs1 (stmt));
272
273    case GIMPLE_UNARY_RHS:
274      {
275        tree lhs = gimple_assign_lhs (stmt);
276        tree op0 = gimple_assign_rhs1 (stmt);
277        return fold_unary (subcode, TREE_TYPE (lhs), op0);
278      }
279
280    case GIMPLE_BINARY_RHS:
281      {
282        tree lhs = gimple_assign_lhs (stmt);
283        tree op0 = gimple_assign_rhs1 (stmt);
284        tree op1 = gimple_assign_rhs2 (stmt);
285        return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
286      }
287
288    case GIMPLE_TERNARY_RHS:
289      {
290        tree lhs = gimple_assign_lhs (stmt);
291        tree op0 = gimple_assign_rhs1 (stmt);
292        tree op1 = gimple_assign_rhs2 (stmt);
293        tree op2 = gimple_assign_rhs3 (stmt);
294
295	/* Sadly, we have to handle conditional assignments specially
296	   here, because fold expects all the operands of an expression
297	   to be folded before the expression itself is folded, but we
298	   can't just substitute the folded condition here.  */
299        if (gimple_assign_rhs_code (stmt) == COND_EXPR)
300	  op0 = fold (op0);
301
302        return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
303      }
304
305    default:
306      gcc_unreachable ();
307    }
308}
309
310/* A new value has been assigned to LHS.  If necessary, invalidate any
311   equivalences that are no longer valid.   This includes invaliding
312   LHS and any objects that are currently equivalent to LHS.
313
314   Finding the objects that are currently marked as equivalent to LHS
315   is a bit tricky.  We could walk the ssa names and see if any have
316   SSA_NAME_VALUE that is the same as LHS.  That's expensive.
317
318   However, it's far more efficient to look at the unwinding stack as
319   that will have all context sensitive equivalences which are the only
320   ones that we really have to worry about here.   */
321static void
322invalidate_equivalences (tree lhs, vec<tree> *stack)
323{
324
325  /* The stack is an unwinding stack.  If the current element is NULL
326     then it's a "stop unwinding" marker.  Else the current marker is
327     the SSA_NAME with an equivalence and the prior entry in the stack
328     is what the current element is equivalent to.  */
329  for (int i = stack->length() - 1; i >= 0; i--)
330    {
331      /* Ignore the stop unwinding markers.  */
332      if ((*stack)[i] == NULL)
333	continue;
334
335      /* We want to check the current value of stack[i] to see if
336	 it matches LHS.  If so, then invalidate.  */
337      if (SSA_NAME_VALUE ((*stack)[i]) == lhs)
338	record_temporary_equivalence ((*stack)[i], NULL_TREE, stack);
339
340      /* Remember, we're dealing with two elements in this case.  */
341      i--;
342    }
343
344  /* And invalidate any known value for LHS itself.  */
345  if (SSA_NAME_VALUE (lhs))
346    record_temporary_equivalence (lhs, NULL_TREE, stack);
347}
348
349/* Try to simplify each statement in E->dest, ultimately leading to
350   a simplification of the COND_EXPR at the end of E->dest.
351
352   Record unwind information for temporary equivalences onto STACK.
353
354   Use SIMPLIFY (a pointer to a callback function) to further simplify
355   statements using pass specific information.
356
357   We might consider marking just those statements which ultimately
358   feed the COND_EXPR.  It's not clear if the overhead of bookkeeping
359   would be recovered by trying to simplify fewer statements.
360
361   If we are able to simplify a statement into the form
362   SSA_NAME = (SSA_NAME | gimple invariant), then we can record
363   a context sensitive equivalence which may help us simplify
364   later statements in E->dest.  */
365
366static gimple
367record_temporary_equivalences_from_stmts_at_dest (edge e,
368						  vec<tree> *stack,
369						  tree (*simplify) (gimple,
370								    gimple),
371						  bool backedge_seen)
372{
373  gimple stmt = NULL;
374  gimple_stmt_iterator gsi;
375  int max_stmt_count;
376
377  max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
378
379  /* Walk through each statement in the block recording equivalences
380     we discover.  Note any equivalences we discover are context
381     sensitive (ie, are dependent on traversing E) and must be unwound
382     when we're finished processing E.  */
383  for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
384    {
385      tree cached_lhs = NULL;
386
387      stmt = gsi_stmt (gsi);
388
389      /* Ignore empty statements and labels.  */
390      if (gimple_code (stmt) == GIMPLE_NOP
391	  || gimple_code (stmt) == GIMPLE_LABEL
392	  || is_gimple_debug (stmt))
393	continue;
394
395      /* If the statement has volatile operands, then we assume we
396	 can not thread through this block.  This is overly
397	 conservative in some ways.  */
398      if (gimple_code (stmt) == GIMPLE_ASM
399	  && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
400	return NULL;
401
402      /* If duplicating this block is going to cause too much code
403	 expansion, then do not thread through this block.  */
404      stmt_count++;
405      if (stmt_count > max_stmt_count)
406	return NULL;
407
408      /* If this is not a statement that sets an SSA_NAME to a new
409	 value, then do not try to simplify this statement as it will
410	 not simplify in any way that is helpful for jump threading.  */
411      if ((gimple_code (stmt) != GIMPLE_ASSIGN
412           || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
413          && (gimple_code (stmt) != GIMPLE_CALL
414              || gimple_call_lhs (stmt) == NULL_TREE
415              || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
416	{
417	  /* STMT might still have DEFS and we need to invalidate any known
418	     equivalences for them.
419
420	     Consider if STMT is a GIMPLE_ASM with one or more outputs that
421	     feeds a conditional inside a loop.  We might derive an equivalence
422	     due to the conditional.  */
423	  tree op;
424	  ssa_op_iter iter;
425
426	  if (backedge_seen)
427	    FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
428	      invalidate_equivalences (op, stack);
429
430	  continue;
431	}
432
433      /* The result of __builtin_object_size depends on all the arguments
434	 of a phi node. Temporarily using only one edge produces invalid
435	 results. For example
436
437	 if (x < 6)
438	   goto l;
439	 else
440	   goto l;
441
442	 l:
443	 r = PHI <&w[2].a[1](2), &a.a[6](3)>
444	 __builtin_object_size (r, 0)
445
446	 The result of __builtin_object_size is defined to be the maximum of
447	 remaining bytes. If we use only one edge on the phi, the result will
448	 change to be the remaining bytes for the corresponding phi argument.
449
450	 Similarly for __builtin_constant_p:
451
452	 r = PHI <1(2), 2(3)>
453	 __builtin_constant_p (r)
454
455	 Both PHI arguments are constant, but x ? 1 : 2 is still not
456	 constant.  */
457
458      if (is_gimple_call (stmt))
459	{
460	  tree fndecl = gimple_call_fndecl (stmt);
461	  if (fndecl
462	      && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
463		  || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
464	    {
465	      if (backedge_seen)
466		{
467		  tree lhs = gimple_get_lhs (stmt);
468		  invalidate_equivalences (lhs, stack);
469		}
470	      continue;
471	    }
472	}
473
474      /* At this point we have a statement which assigns an RHS to an
475	 SSA_VAR on the LHS.  We want to try and simplify this statement
476	 to expose more context sensitive equivalences which in turn may
477	 allow us to simplify the condition at the end of the loop.
478
479	 Handle simple copy operations as well as implied copies from
480	 ASSERT_EXPRs.  */
481      if (gimple_assign_single_p (stmt)
482          && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
483	cached_lhs = gimple_assign_rhs1 (stmt);
484      else if (gimple_assign_single_p (stmt)
485               && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
486	cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
487      else
488	{
489	  /* A statement that is not a trivial copy or ASSERT_EXPR.
490	     We're going to temporarily copy propagate the operands
491	     and see if that allows us to simplify this statement.  */
492	  tree *copy;
493	  ssa_op_iter iter;
494	  use_operand_p use_p;
495	  unsigned int num, i = 0;
496
497	  num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
498	  copy = XCNEWVEC (tree, num);
499
500	  /* Make a copy of the uses & vuses into USES_COPY, then cprop into
501	     the operands.  */
502	  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
503	    {
504	      tree tmp = NULL;
505	      tree use = USE_FROM_PTR (use_p);
506
507	      copy[i++] = use;
508	      if (TREE_CODE (use) == SSA_NAME)
509		tmp = SSA_NAME_VALUE (use);
510	      if (tmp)
511		SET_USE (use_p, tmp);
512	    }
513
514	  /* Try to fold/lookup the new expression.  Inserting the
515	     expression into the hash table is unlikely to help.  */
516          if (is_gimple_call (stmt))
517            cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
518	  else
519            cached_lhs = fold_assignment_stmt (stmt);
520
521          if (!cached_lhs
522              || (TREE_CODE (cached_lhs) != SSA_NAME
523                  && !is_gimple_min_invariant (cached_lhs)))
524            cached_lhs = (*simplify) (stmt, stmt);
525
526	  /* Restore the statement's original uses/defs.  */
527	  i = 0;
528	  FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
529	    SET_USE (use_p, copy[i++]);
530
531	  free (copy);
532	}
533
534      /* Record the context sensitive equivalence if we were able
535	 to simplify this statement.
536
537	 If we have traversed a backedge at some point during threading,
538	 then always enter something here.  Either a real equivalence,
539	 or a NULL_TREE equivalence which is effectively invalidation of
540	 prior equivalences.  */
541      if (cached_lhs
542	  && (TREE_CODE (cached_lhs) == SSA_NAME
543	      || is_gimple_min_invariant (cached_lhs)))
544	record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
545      else if (backedge_seen)
546	invalidate_equivalences (gimple_get_lhs (stmt), stack);
547    }
548  return stmt;
549}
550
551/* Once we have passed a backedge in the CFG when threading, we do not want to
552   utilize edge equivalences for simplification purpose.  They are no longer
553   necessarily valid.  We use this callback rather than the ones provided by
554   DOM/VRP to achieve that effect.  */
555static tree
556dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
557{
558  return NULL_TREE;
559}
560
561/* Simplify the control statement at the end of the block E->dest.
562
563   To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
564   is available to use/clobber in DUMMY_COND.
565
566   Use SIMPLIFY (a pointer to a callback function) to further simplify
567   a condition using pass specific information.
568
569   Return the simplified condition or NULL if simplification could
570   not be performed.  */
571
572static tree
573simplify_control_stmt_condition (edge e,
574				 gimple stmt,
575				 gcond *dummy_cond,
576				 tree (*simplify) (gimple, gimple),
577				 bool handle_dominating_asserts)
578{
579  tree cond, cached_lhs;
580  enum gimple_code code = gimple_code (stmt);
581
582  /* For comparisons, we have to update both operands, then try
583     to simplify the comparison.  */
584  if (code == GIMPLE_COND)
585    {
586      tree op0, op1;
587      enum tree_code cond_code;
588
589      op0 = gimple_cond_lhs (stmt);
590      op1 = gimple_cond_rhs (stmt);
591      cond_code = gimple_cond_code (stmt);
592
593      /* Get the current value of both operands.  */
594      if (TREE_CODE (op0) == SSA_NAME)
595	{
596	  for (int i = 0; i < 2; i++)
597	    {
598	      if (TREE_CODE (op0) == SSA_NAME
599		  && SSA_NAME_VALUE (op0))
600		op0 = SSA_NAME_VALUE (op0);
601	      else
602		break;
603	    }
604	}
605
606      if (TREE_CODE (op1) == SSA_NAME)
607	{
608	  for (int i = 0; i < 2; i++)
609	    {
610	      if (TREE_CODE (op1) == SSA_NAME
611		  && SSA_NAME_VALUE (op1))
612		op1 = SSA_NAME_VALUE (op1);
613	      else
614		break;
615	    }
616	}
617
618      if (handle_dominating_asserts)
619	{
620	  /* Now see if the operand was consumed by an ASSERT_EXPR
621	     which dominates E->src.  If so, we want to replace the
622	     operand with the LHS of the ASSERT_EXPR.  */
623	  if (TREE_CODE (op0) == SSA_NAME)
624	    op0 = lhs_of_dominating_assert (op0, e->src, stmt);
625
626	  if (TREE_CODE (op1) == SSA_NAME)
627	    op1 = lhs_of_dominating_assert (op1, e->src, stmt);
628	}
629
630      /* We may need to canonicalize the comparison.  For
631	 example, op0 might be a constant while op1 is an
632	 SSA_NAME.  Failure to canonicalize will cause us to
633	 miss threading opportunities.  */
634      if (tree_swap_operands_p (op0, op1, false))
635	{
636	  tree tmp;
637	  cond_code = swap_tree_comparison (cond_code);
638	  tmp = op0;
639	  op0 = op1;
640	  op1 = tmp;
641	}
642
643      /* Stuff the operator and operands into our dummy conditional
644	 expression.  */
645      gimple_cond_set_code (dummy_cond, cond_code);
646      gimple_cond_set_lhs (dummy_cond, op0);
647      gimple_cond_set_rhs (dummy_cond, op1);
648
649      /* We absolutely do not care about any type conversions
650         we only care about a zero/nonzero value.  */
651      fold_defer_overflow_warnings ();
652
653      cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
654      if (cached_lhs)
655	while (CONVERT_EXPR_P (cached_lhs))
656          cached_lhs = TREE_OPERAND (cached_lhs, 0);
657
658      fold_undefer_overflow_warnings ((cached_lhs
659                                       && is_gimple_min_invariant (cached_lhs)),
660				      stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
661
662      /* If we have not simplified the condition down to an invariant,
663	 then use the pass specific callback to simplify the condition.  */
664      if (!cached_lhs
665          || !is_gimple_min_invariant (cached_lhs))
666        cached_lhs = (*simplify) (dummy_cond, stmt);
667
668      return cached_lhs;
669    }
670
671  if (code == GIMPLE_SWITCH)
672    cond = gimple_switch_index (as_a <gswitch *> (stmt));
673  else if (code == GIMPLE_GOTO)
674    cond = gimple_goto_dest (stmt);
675  else
676    gcc_unreachable ();
677
678  /* We can have conditionals which just test the state of a variable
679     rather than use a relational operator.  These are simpler to handle.  */
680  if (TREE_CODE (cond) == SSA_NAME)
681    {
682      tree original_lhs = cond;
683      cached_lhs = cond;
684
685      /* Get the variable's current value from the equivalence chains.
686
687	 It is possible to get loops in the SSA_NAME_VALUE chains
688	 (consider threading the backedge of a loop where we have
689	 a loop invariant SSA_NAME used in the condition.  */
690      if (cached_lhs)
691	{
692	  for (int i = 0; i < 2; i++)
693	    {
694	      if (TREE_CODE (cached_lhs) == SSA_NAME
695		  && SSA_NAME_VALUE (cached_lhs))
696		cached_lhs = SSA_NAME_VALUE (cached_lhs);
697	      else
698		break;
699	    }
700	}
701
702      /* If we're dominated by a suitable ASSERT_EXPR, then
703	 update CACHED_LHS appropriately.  */
704      if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
705	cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
706
707      /* If we haven't simplified to an invariant yet, then use the
708	 pass specific callback to try and simplify it further.  */
709      if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
710        cached_lhs = (*simplify) (stmt, stmt);
711
712      /* We couldn't find an invariant.  But, callers of this
713	 function may be able to do something useful with the
714	 unmodified destination.  */
715      if (!cached_lhs)
716	cached_lhs = original_lhs;
717    }
718  else
719    cached_lhs = NULL;
720
721  return cached_lhs;
722}
723
724/* Copy debug stmts from DEST's chain of single predecessors up to
725   SRC, so that we don't lose the bindings as PHI nodes are introduced
726   when DEST gains new predecessors.  */
727void
728propagate_threaded_block_debug_into (basic_block dest, basic_block src)
729{
730  if (!MAY_HAVE_DEBUG_STMTS)
731    return;
732
733  if (!single_pred_p (dest))
734    return;
735
736  gcc_checking_assert (dest != src);
737
738  gimple_stmt_iterator gsi = gsi_after_labels (dest);
739  int i = 0;
740  const int alloc_count = 16; // ?? Should this be a PARAM?
741
742  /* Estimate the number of debug vars overridden in the beginning of
743     DEST, to tell how many we're going to need to begin with.  */
744  for (gimple_stmt_iterator si = gsi;
745       i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
746    {
747      gimple stmt = gsi_stmt (si);
748      if (!is_gimple_debug (stmt))
749	break;
750      i++;
751    }
752
753  auto_vec<tree, alloc_count> fewvars;
754  hash_set<tree> *vars = NULL;
755
756  /* If we're already starting with 3/4 of alloc_count, go for a
757     hash_set, otherwise start with an unordered stack-allocated
758     VEC.  */
759  if (i * 4 > alloc_count * 3)
760    vars = new hash_set<tree>;
761
762  /* Now go through the initial debug stmts in DEST again, this time
763     actually inserting in VARS or FEWVARS.  Don't bother checking for
764     duplicates in FEWVARS.  */
765  for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
766    {
767      gimple stmt = gsi_stmt (si);
768      if (!is_gimple_debug (stmt))
769	break;
770
771      tree var;
772
773      if (gimple_debug_bind_p (stmt))
774	var = gimple_debug_bind_get_var (stmt);
775      else if (gimple_debug_source_bind_p (stmt))
776	var = gimple_debug_source_bind_get_var (stmt);
777      else
778	gcc_unreachable ();
779
780      if (vars)
781	vars->add (var);
782      else
783	fewvars.quick_push (var);
784    }
785
786  basic_block bb = dest;
787
788  do
789    {
790      bb = single_pred (bb);
791      for (gimple_stmt_iterator si = gsi_last_bb (bb);
792	   !gsi_end_p (si); gsi_prev (&si))
793	{
794	  gimple stmt = gsi_stmt (si);
795	  if (!is_gimple_debug (stmt))
796	    continue;
797
798	  tree var;
799
800	  if (gimple_debug_bind_p (stmt))
801	    var = gimple_debug_bind_get_var (stmt);
802	  else if (gimple_debug_source_bind_p (stmt))
803	    var = gimple_debug_source_bind_get_var (stmt);
804	  else
805	    gcc_unreachable ();
806
807	  /* Discard debug bind overlaps.  ??? Unlike stmts from src,
808	     copied into a new block that will precede BB, debug bind
809	     stmts in bypassed BBs may actually be discarded if
810	     they're overwritten by subsequent debug bind stmts, which
811	     might be a problem once we introduce stmt frontier notes
812	     or somesuch.  Adding `&& bb == src' to the condition
813	     below will preserve all potentially relevant debug
814	     notes.  */
815	  if (vars && vars->add (var))
816	    continue;
817	  else if (!vars)
818	    {
819	      int i = fewvars.length ();
820	      while (i--)
821		if (fewvars[i] == var)
822		  break;
823	      if (i >= 0)
824		continue;
825
826	      if (fewvars.length () < (unsigned) alloc_count)
827		fewvars.quick_push (var);
828	      else
829		{
830		  vars = new hash_set<tree>;
831		  for (i = 0; i < alloc_count; i++)
832		    vars->add (fewvars[i]);
833		  fewvars.release ();
834		  vars->add (var);
835		}
836	    }
837
838	  stmt = gimple_copy (stmt);
839	  /* ??? Should we drop the location of the copy to denote
840	     they're artificial bindings?  */
841	  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
842	}
843    }
844  while (bb != src && single_pred_p (bb));
845
846  if (vars)
847    delete vars;
848  else if (fewvars.exists ())
849    fewvars.release ();
850}
851
852/* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
853   need not be duplicated as part of the CFG/SSA updating process).
854
855   If it is threadable, add it to PATH and VISITED and recurse, ultimately
856   returning TRUE from the toplevel call.   Otherwise do nothing and
857   return false.
858
859   DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
860   try and simplify the condition at the end of TAKEN_EDGE->dest.  */
861static bool
862thread_around_empty_blocks (edge taken_edge,
863			    gcond *dummy_cond,
864			    bool handle_dominating_asserts,
865			    tree (*simplify) (gimple, gimple),
866			    bitmap visited,
867			    vec<jump_thread_edge *> *path,
868			    bool *backedge_seen_p)
869{
870  basic_block bb = taken_edge->dest;
871  gimple_stmt_iterator gsi;
872  gimple stmt;
873  tree cond;
874
875  /* The key property of these blocks is that they need not be duplicated
876     when threading.  Thus they can not have visible side effects such
877     as PHI nodes.  */
878  if (!gsi_end_p (gsi_start_phis (bb)))
879    return false;
880
881  /* Skip over DEBUG statements at the start of the block.  */
882  gsi = gsi_start_nondebug_bb (bb);
883
884  /* If the block has no statements, but does have a single successor, then
885     it's just a forwarding block and we can thread through it trivially.
886
887     However, note that just threading through empty blocks with single
888     successors is not inherently profitable.  For the jump thread to
889     be profitable, we must avoid a runtime conditional.
890
891     By taking the return value from the recursive call, we get the
892     desired effect of returning TRUE when we found a profitable jump
893     threading opportunity and FALSE otherwise.
894
895     This is particularly important when this routine is called after
896     processing a joiner block.  Returning TRUE too aggressively in
897     that case results in pointless duplication of the joiner block.  */
898  if (gsi_end_p (gsi))
899    {
900      if (single_succ_p (bb))
901	{
902	  taken_edge = single_succ_edge (bb);
903	  if (!bitmap_bit_p (visited, taken_edge->dest->index))
904	    {
905	      jump_thread_edge *x
906		= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
907	      path->safe_push (x);
908	      bitmap_set_bit (visited, taken_edge->dest->index);
909	      *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
910	      if (*backedge_seen_p)
911		simplify = dummy_simplify;
912	      return thread_around_empty_blocks (taken_edge,
913						 dummy_cond,
914						 handle_dominating_asserts,
915						 simplify,
916						 visited,
917						 path,
918						 backedge_seen_p);
919	    }
920	}
921
922      /* We have a block with no statements, but multiple successors?  */
923      return false;
924    }
925
926  /* The only real statements this block can have are a control
927     flow altering statement.  Anything else stops the thread.  */
928  stmt = gsi_stmt (gsi);
929  if (gimple_code (stmt) != GIMPLE_COND
930      && gimple_code (stmt) != GIMPLE_GOTO
931      && gimple_code (stmt) != GIMPLE_SWITCH)
932    return false;
933
934  /* If we have traversed a backedge, then we do not want to look
935     at certain expressions in the table that can not be relied upon.
936     Luckily the only code that looked at those expressions is the
937     SIMPLIFY callback, which we replace if we can no longer use it.  */
938  if (*backedge_seen_p)
939    simplify = dummy_simplify;
940
941  /* Extract and simplify the condition.  */
942  cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
943					  simplify, handle_dominating_asserts);
944
945  /* If the condition can be statically computed and we have not already
946     visited the destination edge, then add the taken edge to our thread
947     path.  */
948  if (cond && is_gimple_min_invariant (cond))
949    {
950      taken_edge = find_taken_edge (bb, cond);
951
952      if (bitmap_bit_p (visited, taken_edge->dest->index))
953	return false;
954      bitmap_set_bit (visited, taken_edge->dest->index);
955
956      jump_thread_edge *x
957	= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
958      path->safe_push (x);
959      *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
960      if (*backedge_seen_p)
961	simplify = dummy_simplify;
962
963      thread_around_empty_blocks (taken_edge,
964				  dummy_cond,
965				  handle_dominating_asserts,
966				  simplify,
967				  visited,
968				  path,
969				  backedge_seen_p);
970      return true;
971    }
972
973  return false;
974}
975
976/* Return true if the CFG contains at least one path from START_BB to END_BB.
977   When a path is found, record in PATH the blocks from END_BB to START_BB.
978   VISITED_BBS is used to make sure we don't fall into an infinite loop.  Bound
979   the recursion to basic blocks belonging to LOOP.  */
980
981static bool
982fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
983		      vec<basic_block, va_gc> *&path,
984		      hash_set<basic_block> *visited_bbs, loop_p loop)
985{
986  if (loop != start_bb->loop_father)
987    return false;
988
989  if (start_bb == end_bb)
990    {
991      vec_safe_push (path, start_bb);
992      return true;
993    }
994
995  if (!visited_bbs->add (start_bb))
996    {
997      edge e;
998      edge_iterator ei;
999      FOR_EACH_EDGE (e, ei, start_bb->succs)
1000	if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
1001	  {
1002	    vec_safe_push (path, start_bb);
1003	    return true;
1004	  }
1005    }
1006
1007  return false;
1008}
1009
1010static int max_threaded_paths;
1011
1012/* We trace the value of the variable EXPR back through any phi nodes looking
1013   for places where it gets a constant value and save the path.  Stop after
1014   having recorded MAX_PATHS jump threading paths.  */
1015
1016static void
1017fsm_find_control_statement_thread_paths (tree expr,
1018					 hash_set<basic_block> *visited_bbs,
1019					 vec<basic_block, va_gc> *&path,
1020					 bool seen_loop_phi)
1021{
1022  tree var = SSA_NAME_VAR (expr);
1023  gimple def_stmt = SSA_NAME_DEF_STMT (expr);
1024  basic_block var_bb = gimple_bb (def_stmt);
1025
1026  if (var == NULL || var_bb == NULL)
1027    return;
1028
1029  /* For the moment we assume that an SSA chain only contains phi nodes, and
1030     eventually one of the phi arguments will be an integer constant.  In the
1031     future, this could be extended to also handle simple assignments of
1032     arithmetic operations.  */
1033  if (gimple_code (def_stmt) != GIMPLE_PHI)
1034    return;
1035
1036  /* Avoid infinite recursion.  */
1037  if (visited_bbs->add (var_bb))
1038    return;
1039
1040  gphi *phi = as_a <gphi *> (def_stmt);
1041  int next_path_length = 0;
1042  basic_block last_bb_in_path = path->last ();
1043
1044  if (loop_containing_stmt (phi)->header == gimple_bb (phi))
1045    {
1046      /* Do not walk through more than one loop PHI node.  */
1047      if (seen_loop_phi)
1048	return;
1049      seen_loop_phi = true;
1050    }
1051
1052  /* Following the chain of SSA_NAME definitions, we jumped from a definition in
1053     LAST_BB_IN_PATH to a definition in VAR_BB.  When these basic blocks are
1054     different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB.  */
1055  if (var_bb != last_bb_in_path)
1056    {
1057      edge e;
1058      int e_count = 0;
1059      edge_iterator ei;
1060      vec<basic_block, va_gc> *next_path;
1061      vec_alloc (next_path, n_basic_blocks_for_fn (cfun));
1062
1063      FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
1064	{
1065	  hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1066
1067	  if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
1068				    e->src->loop_father))
1069	    ++e_count;
1070
1071	  delete visited_bbs;
1072
1073	  /* If there is more than one path, stop.  */
1074	  if (e_count > 1)
1075	    {
1076	      vec_free (next_path);
1077	      return;
1078	    }
1079	}
1080
1081      /* Stop if we have not found a path: this could occur when the recursion
1082	 is stopped by one of the bounds.  */
1083      if (e_count == 0)
1084	{
1085	  vec_free (next_path);
1086	  return;
1087	}
1088
1089      /* Append all the nodes from NEXT_PATH to PATH.  */
1090      vec_safe_splice (path, next_path);
1091      next_path_length = next_path->length ();
1092      vec_free (next_path);
1093    }
1094
1095  gcc_assert (path->last () == var_bb);
1096
1097  /* Iterate over the arguments of PHI.  */
1098  unsigned int i;
1099  for (i = 0; i < gimple_phi_num_args (phi); i++)
1100    {
1101      tree arg = gimple_phi_arg_def (phi, i);
1102      basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
1103
1104      /* Skip edges pointing outside the current loop.  */
1105      if (!arg || var_bb->loop_father != bbi->loop_father)
1106	continue;
1107
1108      if (TREE_CODE (arg) == SSA_NAME)
1109	{
1110	  vec_safe_push (path, bbi);
1111	  /* Recursively follow SSA_NAMEs looking for a constant definition.  */
1112	  fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
1113						   seen_loop_phi);
1114
1115	  path->pop ();
1116	  continue;
1117	}
1118
1119      if (TREE_CODE (arg) != INTEGER_CST)
1120	continue;
1121
1122      int path_length = path->length ();
1123      /* A path with less than 2 basic blocks should not be jump-threaded.  */
1124      if (path_length < 2)
1125	continue;
1126
1127      if (path_length > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
1128	{
1129	  if (dump_file && (dump_flags & TDF_DETAILS))
1130	    fprintf (dump_file, "FSM jump-thread path not considered: "
1131		     "the number of basic blocks on the path "
1132		     "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
1133	  continue;
1134	}
1135
1136      if (max_threaded_paths <= 0)
1137	{
1138	  if (dump_file && (dump_flags & TDF_DETAILS))
1139	    fprintf (dump_file, "FSM jump-thread path not considered: "
1140		     "the number of previously recorded FSM paths to thread "
1141		     "exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
1142	  continue;
1143	}
1144
1145      /* Add BBI to the path.  */
1146      vec_safe_push (path, bbi);
1147      ++path_length;
1148
1149      int n_insns = 0;
1150      gimple_stmt_iterator gsi;
1151      int j;
1152      loop_p loop = (*path)[0]->loop_father;
1153      bool path_crosses_loops = false;
1154
1155      /* Count the number of instructions on the path: as these instructions
1156	 will have to be duplicated, we will not record the path if there are
1157	 too many instructions on the path.  Also check that all the blocks in
1158	 the path belong to a single loop.  */
1159      for (j = 1; j < path_length - 1; j++)
1160	{
1161	  basic_block bb = (*path)[j];
1162
1163	  if (bb->loop_father != loop)
1164	    {
1165	      path_crosses_loops = true;
1166	      break;
1167	    }
1168
1169	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1170	    {
1171	      gimple stmt = gsi_stmt (gsi);
1172	      /* Do not count empty statements and labels.  */
1173	      if (gimple_code (stmt) != GIMPLE_NOP
1174		  && gimple_code (stmt) != GIMPLE_LABEL
1175		  && !is_gimple_debug (stmt))
1176		++n_insns;
1177	    }
1178	}
1179
1180      if (path_crosses_loops)
1181	{
1182	  if (dump_file && (dump_flags & TDF_DETAILS))
1183	    fprintf (dump_file, "FSM jump-thread path not considered: "
1184		     "the path crosses loops.\n");
1185	  path->pop ();
1186	  continue;
1187	}
1188
1189      if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
1190	{
1191	  if (dump_file && (dump_flags & TDF_DETAILS))
1192	    fprintf (dump_file, "FSM jump-thread path not considered: "
1193		     "the number of instructions on the path "
1194		     "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
1195	  path->pop ();
1196	  continue;
1197	}
1198
1199      vec<jump_thread_edge *> *jump_thread_path
1200	= new vec<jump_thread_edge *> ();
1201
1202      /* Record the edges between the blocks in PATH.  */
1203      for (j = 0; j < path_length - 1; j++)
1204	{
1205	  edge e = find_edge ((*path)[path_length - j - 1],
1206			      (*path)[path_length - j - 2]);
1207	  gcc_assert (e);
1208	  jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
1209	  jump_thread_path->safe_push (x);
1210	}
1211
1212      /* Add the edge taken when the control variable has value ARG.  */
1213      edge taken_edge = find_taken_edge ((*path)[0], arg);
1214      jump_thread_edge *x
1215	= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1216      jump_thread_path->safe_push (x);
1217
1218      register_jump_thread (jump_thread_path);
1219      --max_threaded_paths;
1220
1221      /* Remove BBI from the path.  */
1222      path->pop ();
1223    }
1224
1225  /* Remove all the nodes that we added from NEXT_PATH.  */
1226  if (next_path_length)
1227    vec_safe_truncate (path, (path->length () - next_path_length));
1228}
1229
1230/* We are exiting E->src, see if E->dest ends with a conditional
1231   jump which has a known value when reached via E.
1232
1233   E->dest can have arbitrary side effects which, if threading is
1234   successful, will be maintained.
1235
1236   Special care is necessary if E is a back edge in the CFG as we
1237   may have already recorded equivalences for E->dest into our
1238   various tables, including the result of the conditional at
1239   the end of E->dest.  Threading opportunities are severely
1240   limited in that case to avoid short-circuiting the loop
1241   incorrectly.
1242
1243   DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1244   to avoid allocating memory.
1245
1246   HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1247   the simplified condition with left-hand sides of ASSERT_EXPRs they are
1248   used in.
1249
1250   STACK is used to undo temporary equivalences created during the walk of
1251   E->dest.
1252
1253   SIMPLIFY is a pass-specific function used to simplify statements.
1254
1255   Our caller is responsible for restoring the state of the expression
1256   and const_and_copies stacks.
1257
1258   Positive return value is success.  Zero return value is failure, but
1259   the block can still be duplicated as a joiner in a jump thread path,
1260   negative indicates the block should not be duplicated and thus is not
1261   suitable for a joiner in a jump threading path.  */
1262
1263static int
1264thread_through_normal_block (edge e,
1265			     gcond *dummy_cond,
1266			     bool handle_dominating_asserts,
1267			     vec<tree> *stack,
1268			     tree (*simplify) (gimple, gimple),
1269			     vec<jump_thread_edge *> *path,
1270			     bitmap visited,
1271			     bool *backedge_seen_p)
1272{
1273  /* If we have traversed a backedge, then we do not want to look
1274     at certain expressions in the table that can not be relied upon.
1275     Luckily the only code that looked at those expressions is the
1276     SIMPLIFY callback, which we replace if we can no longer use it.  */
1277  if (*backedge_seen_p)
1278    simplify = dummy_simplify;
1279
1280  /* PHIs create temporary equivalences.
1281     Note that if we found a PHI that made the block non-threadable, then
1282     we need to bubble that up to our caller in the same manner we do
1283     when we prematurely stop processing statements below.  */
1284  if (!record_temporary_equivalences_from_phis (e, stack))
1285    return -1;
1286
1287  /* Now walk each statement recording any context sensitive
1288     temporary equivalences we can detect.  */
1289  gimple stmt
1290    = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify,
1291							*backedge_seen_p);
1292
1293  /* There's two reasons STMT might be null, and distinguishing
1294     between them is important.
1295
1296     First the block may not have had any statements.  For example, it
1297     might have some PHIs and unconditionally transfer control elsewhere.
1298     Such blocks are suitable for jump threading, particularly as a
1299     joiner block.
1300
1301     The second reason would be if we did not process all the statements
1302     in the block (because there were too many to make duplicating the
1303     block profitable.   If we did not look at all the statements, then
1304     we may not have invalidated everything needing invalidation.  Thus
1305     we must signal to our caller that this block is not suitable for
1306     use as a joiner in a threading path.  */
1307  if (!stmt)
1308    {
1309      /* First case.  The statement simply doesn't have any instructions, but
1310	 does have PHIs.  */
1311      if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1312	  && !gsi_end_p (gsi_start_phis (e->dest)))
1313	return 0;
1314
1315      /* Second case.  */
1316      return -1;
1317    }
1318
1319  /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1320     will be taken.  */
1321  if (gimple_code (stmt) == GIMPLE_COND
1322      || gimple_code (stmt) == GIMPLE_GOTO
1323      || gimple_code (stmt) == GIMPLE_SWITCH)
1324    {
1325      tree cond;
1326
1327      /* Extract and simplify the condition.  */
1328      cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
1329					      handle_dominating_asserts);
1330
1331      if (!cond)
1332	return 0;
1333
1334      if (is_gimple_min_invariant (cond))
1335	{
1336	  edge taken_edge = find_taken_edge (e->dest, cond);
1337	  basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1338
1339	  /* DEST could be NULL for a computed jump to an absolute
1340	     address.  */
1341	  if (dest == NULL
1342	      || dest == e->dest
1343	      || bitmap_bit_p (visited, dest->index))
1344	    return 0;
1345
1346	  /* Only push the EDGE_START_JUMP_THREAD marker if this is
1347	     first edge on the path.  */
1348	  if (path->length () == 0)
1349	    {
1350              jump_thread_edge *x
1351	        = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1352	      path->safe_push (x);
1353	      *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1354	    }
1355
1356	  jump_thread_edge *x
1357	    = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1358	  path->safe_push (x);
1359	  *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1360	  if (*backedge_seen_p)
1361	    simplify = dummy_simplify;
1362
1363	  /* See if we can thread through DEST as well, this helps capture
1364	     secondary effects of threading without having to re-run DOM or
1365	     VRP.
1366
1367	     We don't want to thread back to a block we have already
1368 	     visited.  This may be overly conservative.  */
1369	  bitmap_set_bit (visited, dest->index);
1370	  bitmap_set_bit (visited, e->dest->index);
1371	  thread_around_empty_blocks (taken_edge,
1372				      dummy_cond,
1373				      handle_dominating_asserts,
1374				      simplify,
1375				      visited,
1376				      path,
1377				      backedge_seen_p);
1378	  return 1;
1379	}
1380
1381      if (!flag_expensive_optimizations
1382	  || optimize_function_for_size_p (cfun)
1383	  || TREE_CODE (cond) != SSA_NAME
1384	  || e->dest->loop_father != e->src->loop_father
1385	  || loop_depth (e->dest->loop_father) == 0)
1386	return 0;
1387
1388      /* When COND cannot be simplified, try to find paths from a control
1389	 statement back through the PHI nodes which would affect that control
1390	 statement.  */
1391      vec<basic_block, va_gc> *bb_path;
1392      vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
1393      vec_safe_push (bb_path, e->dest);
1394      hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1395
1396      max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
1397      fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
1398					       false);
1399
1400      delete visited_bbs;
1401      vec_free (bb_path);
1402    }
1403  return 0;
1404}
1405
1406/* We are exiting E->src, see if E->dest ends with a conditional
1407   jump which has a known value when reached via E.
1408
1409   Special care is necessary if E is a back edge in the CFG as we
1410   may have already recorded equivalences for E->dest into our
1411   various tables, including the result of the conditional at
1412   the end of E->dest.  Threading opportunities are severely
1413   limited in that case to avoid short-circuiting the loop
1414   incorrectly.
1415
1416   Note it is quite common for the first block inside a loop to
1417   end with a conditional which is either always true or always
1418   false when reached via the loop backedge.  Thus we do not want
1419   to blindly disable threading across a loop backedge.
1420
1421   DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1422   to avoid allocating memory.
1423
1424   HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1425   the simplified condition with left-hand sides of ASSERT_EXPRs they are
1426   used in.
1427
1428   STACK is used to undo temporary equivalences created during the walk of
1429   E->dest.
1430
1431   SIMPLIFY is a pass-specific function used to simplify statements.  */
1432
1433void
1434thread_across_edge (gcond *dummy_cond,
1435		    edge e,
1436		    bool handle_dominating_asserts,
1437		    vec<tree> *stack,
1438		    tree (*simplify) (gimple, gimple))
1439{
1440  bitmap visited = BITMAP_ALLOC (NULL);
1441  bool backedge_seen;
1442
1443  stmt_count = 0;
1444
1445  vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1446  bitmap_clear (visited);
1447  bitmap_set_bit (visited, e->src->index);
1448  bitmap_set_bit (visited, e->dest->index);
1449  backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1450  if (backedge_seen)
1451    simplify = dummy_simplify;
1452
1453  int threaded = thread_through_normal_block (e, dummy_cond,
1454					      handle_dominating_asserts,
1455					      stack, simplify, path,
1456					      visited, &backedge_seen);
1457  if (threaded > 0)
1458    {
1459      propagate_threaded_block_debug_into (path->last ()->e->dest,
1460					   e->dest);
1461      remove_temporary_equivalences (stack);
1462      BITMAP_FREE (visited);
1463      register_jump_thread (path);
1464      return;
1465    }
1466  else
1467    {
1468      /* Negative and zero return values indicate no threading was possible,
1469	 thus there should be no edges on the thread path and no need to walk
1470	 through the vector entries.  */
1471      gcc_assert (path->length () == 0);
1472      path->release ();
1473      delete path;
1474
1475      /* A negative status indicates the target block was deemed too big to
1476	 duplicate.  Just quit now rather than trying to use the block as
1477	 a joiner in a jump threading path.
1478
1479	 This prevents unnecessary code growth, but more importantly if we
1480	 do not look at all the statements in the block, then we may have
1481	 missed some invalidations if we had traversed a backedge!  */
1482      if (threaded < 0)
1483	{
1484	  BITMAP_FREE (visited);
1485	  remove_temporary_equivalences (stack);
1486	  return;
1487	}
1488    }
1489
1490 /* We were unable to determine what out edge from E->dest is taken.  However,
1491    we might still be able to thread through successors of E->dest.  This
1492    often occurs when E->dest is a joiner block which then fans back out
1493    based on redundant tests.
1494
1495    If so, we'll copy E->dest and redirect the appropriate predecessor to
1496    the copy.  Within the copy of E->dest, we'll thread one or more edges
1497    to points deeper in the CFG.
1498
1499    This is a stopgap until we have a more structured approach to path
1500    isolation.  */
1501  {
1502    edge taken_edge;
1503    edge_iterator ei;
1504    bool found;
1505
1506    /* If E->dest has abnormal outgoing edges, then there's no guarantee
1507       we can safely redirect any of the edges.  Just punt those cases.  */
1508    FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1509      if (taken_edge->flags & EDGE_ABNORMAL)
1510	{
1511	  remove_temporary_equivalences (stack);
1512	  BITMAP_FREE (visited);
1513	  return;
1514	}
1515
1516    /* Look at each successor of E->dest to see if we can thread through it.  */
1517    FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1518      {
1519	/* Push a fresh marker so we can unwind the equivalences created
1520	   for each of E->dest's successors.  */
1521	stack->safe_push (NULL_TREE);
1522
1523	/* Avoid threading to any block we have already visited.  */
1524	bitmap_clear (visited);
1525	bitmap_set_bit (visited, e->src->index);
1526	bitmap_set_bit (visited, e->dest->index);
1527	bitmap_set_bit (visited, taken_edge->dest->index);
1528        vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1529
1530	/* Record whether or not we were able to thread through a successor
1531	   of E->dest.  */
1532        jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1533	path->safe_push (x);
1534
1535        x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1536	path->safe_push (x);
1537	found = false;
1538	backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1539	backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1540	if (backedge_seen)
1541	  simplify = dummy_simplify;
1542	found = thread_around_empty_blocks (taken_edge,
1543					    dummy_cond,
1544					    handle_dominating_asserts,
1545					    simplify,
1546					    visited,
1547					    path,
1548					    &backedge_seen);
1549
1550	if (backedge_seen)
1551	  simplify = dummy_simplify;
1552
1553	if (!found)
1554	  found = thread_through_normal_block (path->last ()->e, dummy_cond,
1555					       handle_dominating_asserts,
1556					       stack, simplify, path, visited,
1557					       &backedge_seen) > 0;
1558
1559	/* If we were able to thread through a successor of E->dest, then
1560	   record the jump threading opportunity.  */
1561	if (found)
1562	  {
1563	    propagate_threaded_block_debug_into (path->last ()->e->dest,
1564						 taken_edge->dest);
1565	    register_jump_thread (path);
1566	  }
1567	else
1568	  {
1569	    delete_jump_thread_path (path);
1570	  }
1571
1572	/* And unwind the equivalence table.  */
1573	remove_temporary_equivalences (stack);
1574      }
1575    BITMAP_FREE (visited);
1576  }
1577
1578  remove_temporary_equivalences (stack);
1579}
1580