1/* Control flow optimization code for GNU compiler.
2   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA.  */
21
22/* This file contains optimizer of the control flow.  The main entry point is
23   cleanup_cfg.  Following optimizations are performed:
24
25   - Unreachable blocks removal
26   - Edge forwarding (edge to the forwarder block is forwarded to its
27     successor.  Simplification of the branch instruction is performed by
28     underlying infrastructure so branch can be converted to simplejump or
29     eliminated).
30   - Cross jumping (tail merging)
31   - Conditional jump-around-simplejump simplification
32   - Basic block merging.  */
33
34#include "config.h"
35#include "system.h"
36#include "coretypes.h"
37#include "tm.h"
38#include "rtl.h"
39#include "hard-reg-set.h"
40#include "regs.h"
41#include "timevar.h"
42#include "output.h"
43#include "insn-config.h"
44#include "flags.h"
45#include "recog.h"
46#include "toplev.h"
47#include "cselib.h"
48#include "params.h"
49#include "tm_p.h"
50#include "target.h"
51#include "cfglayout.h"
52#include "emit-rtl.h"
53#include "tree-pass.h"
54#include "cfgloop.h"
55#include "expr.h"
56
57#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58
59/* Set to true when we are running first pass of try_optimize_cfg loop.  */
60static bool first_pass;
61static bool try_crossjump_to_edge (int, edge, edge);
62static bool try_crossjump_bb (int, basic_block);
63static bool outgoing_edges_match (int, basic_block, basic_block);
64static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
65static bool insns_match_p (int, rtx, rtx);
66
67static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
68static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
69static bool try_optimize_cfg (int);
70static bool try_simplify_condjump (basic_block);
71static bool try_forward_edges (int, basic_block);
72static edge thread_jump (int, edge, basic_block);
73static bool mark_effect (rtx, bitmap);
74static void notice_new_block (basic_block);
75static void update_forwarder_flag (basic_block);
76static int mentions_nonequal_regs (rtx *, void *);
77static void merge_memattrs (rtx, rtx);
78
79/* Set flags for newly created block.  */
80
81static void
82notice_new_block (basic_block bb)
83{
84  if (!bb)
85    return;
86
87  if (forwarder_block_p (bb))
88    bb->flags |= BB_FORWARDER_BLOCK;
89}
90
91/* Recompute forwarder flag after block has been modified.  */
92
93static void
94update_forwarder_flag (basic_block bb)
95{
96  if (forwarder_block_p (bb))
97    bb->flags |= BB_FORWARDER_BLOCK;
98  else
99    bb->flags &= ~BB_FORWARDER_BLOCK;
100}
101
102/* Simplify a conditional jump around an unconditional jump.
103   Return true if something changed.  */
104
105static bool
106try_simplify_condjump (basic_block cbranch_block)
107{
108  basic_block jump_block, jump_dest_block, cbranch_dest_block;
109  edge cbranch_jump_edge, cbranch_fallthru_edge;
110  rtx cbranch_insn;
111
112  /* Verify that there are exactly two successors.  */
113  if (EDGE_COUNT (cbranch_block->succs) != 2)
114    return false;
115
116  /* Verify that we've got a normal conditional branch at the end
117     of the block.  */
118  cbranch_insn = BB_END (cbranch_block);
119  if (!any_condjump_p (cbranch_insn))
120    return false;
121
122  cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
123  cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
124
125  /* The next block must not have multiple predecessors, must not
126     be the last block in the function, and must contain just the
127     unconditional jump.  */
128  jump_block = cbranch_fallthru_edge->dest;
129  if (!single_pred_p (jump_block)
130      || jump_block->next_bb == EXIT_BLOCK_PTR
131      || !FORWARDER_BLOCK_P (jump_block))
132    return false;
133  jump_dest_block = single_succ (jump_block);
134
135  /* If we are partitioning hot/cold basic blocks, we don't want to
136     mess up unconditional or indirect jumps that cross between hot
137     and cold sections.
138
139     Basic block partitioning may result in some jumps that appear to
140     be optimizable (or blocks that appear to be mergeable), but which really
141     must be left untouched (they are required to make it safely across
142     partition boundaries).  See the comments at the top of
143     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
144
145  if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
146      || (cbranch_jump_edge->flags & EDGE_CROSSING))
147    return false;
148
149  /* The conditional branch must target the block after the
150     unconditional branch.  */
151  cbranch_dest_block = cbranch_jump_edge->dest;
152
153  if (cbranch_dest_block == EXIT_BLOCK_PTR
154      || !can_fallthru (jump_block, cbranch_dest_block))
155    return false;
156
157  /* Invert the conditional branch.  */
158  if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
159    return false;
160
161  if (dump_file)
162    fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
163	     INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
164
165  /* Success.  Update the CFG to match.  Note that after this point
166     the edge variable names appear backwards; the redirection is done
167     this way to preserve edge profile data.  */
168  cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
169						cbranch_dest_block);
170  cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
171						    jump_dest_block);
172  cbranch_jump_edge->flags |= EDGE_FALLTHRU;
173  cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174  update_br_prob_note (cbranch_block);
175
176  /* Delete the block with the unconditional jump, and clean up the mess.  */
177  delete_basic_block (jump_block);
178  tidy_fallthru_edge (cbranch_jump_edge);
179  update_forwarder_flag (cbranch_block);
180
181  return true;
182}
183
184/* Attempt to prove that operation is NOOP using CSElib or mark the effect
185   on register.  Used by jump threading.  */
186
187static bool
188mark_effect (rtx exp, regset nonequal)
189{
190  int regno;
191  rtx dest;
192  switch (GET_CODE (exp))
193    {
194      /* In case we do clobber the register, mark it as equal, as we know the
195         value is dead so it don't have to match.  */
196    case CLOBBER:
197      if (REG_P (XEXP (exp, 0)))
198	{
199	  dest = XEXP (exp, 0);
200	  regno = REGNO (dest);
201	  CLEAR_REGNO_REG_SET (nonequal, regno);
202	  if (regno < FIRST_PSEUDO_REGISTER)
203	    {
204	      int n = hard_regno_nregs[regno][GET_MODE (dest)];
205	      while (--n > 0)
206		CLEAR_REGNO_REG_SET (nonequal, regno + n);
207	    }
208	}
209      return false;
210
211    case SET:
212      if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
213	return false;
214      dest = SET_DEST (exp);
215      if (dest == pc_rtx)
216	return false;
217      if (!REG_P (dest))
218	return true;
219      regno = REGNO (dest);
220      SET_REGNO_REG_SET (nonequal, regno);
221      if (regno < FIRST_PSEUDO_REGISTER)
222	{
223	  int n = hard_regno_nregs[regno][GET_MODE (dest)];
224	  while (--n > 0)
225	    SET_REGNO_REG_SET (nonequal, regno + n);
226	}
227      return false;
228
229    default:
230      return false;
231    }
232}
233
234/* Return nonzero if X is a register set in regset DATA.
235   Called via for_each_rtx.  */
236static int
237mentions_nonequal_regs (rtx *x, void *data)
238{
239  regset nonequal = (regset) data;
240  if (REG_P (*x))
241    {
242      int regno;
243
244      regno = REGNO (*x);
245      if (REGNO_REG_SET_P (nonequal, regno))
246	return 1;
247      if (regno < FIRST_PSEUDO_REGISTER)
248	{
249	  int n = hard_regno_nregs[regno][GET_MODE (*x)];
250	  while (--n > 0)
251	    if (REGNO_REG_SET_P (nonequal, regno + n))
252	      return 1;
253	}
254    }
255  return 0;
256}
257/* Attempt to prove that the basic block B will have no side effects and
258   always continues in the same edge if reached via E.  Return the edge
259   if exist, NULL otherwise.  */
260
261static edge
262thread_jump (int mode, edge e, basic_block b)
263{
264  rtx set1, set2, cond1, cond2, insn;
265  enum rtx_code code1, code2, reversed_code2;
266  bool reverse1 = false;
267  unsigned i;
268  regset nonequal;
269  bool failed = false;
270  reg_set_iterator rsi;
271
272  if (b->flags & BB_NONTHREADABLE_BLOCK)
273    return NULL;
274
275  /* At the moment, we do handle only conditional jumps, but later we may
276     want to extend this code to tablejumps and others.  */
277  if (EDGE_COUNT (e->src->succs) != 2)
278    return NULL;
279  if (EDGE_COUNT (b->succs) != 2)
280    {
281      b->flags |= BB_NONTHREADABLE_BLOCK;
282      return NULL;
283    }
284
285  /* Second branch must end with onlyjump, as we will eliminate the jump.  */
286  if (!any_condjump_p (BB_END (e->src)))
287    return NULL;
288
289  if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
290    {
291      b->flags |= BB_NONTHREADABLE_BLOCK;
292      return NULL;
293    }
294
295  set1 = pc_set (BB_END (e->src));
296  set2 = pc_set (BB_END (b));
297  if (((e->flags & EDGE_FALLTHRU) != 0)
298      != (XEXP (SET_SRC (set1), 1) == pc_rtx))
299    reverse1 = true;
300
301  cond1 = XEXP (SET_SRC (set1), 0);
302  cond2 = XEXP (SET_SRC (set2), 0);
303  if (reverse1)
304    code1 = reversed_comparison_code (cond1, BB_END (e->src));
305  else
306    code1 = GET_CODE (cond1);
307
308  code2 = GET_CODE (cond2);
309  reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
310
311  if (!comparison_dominates_p (code1, code2)
312      && !comparison_dominates_p (code1, reversed_code2))
313    return NULL;
314
315  /* Ensure that the comparison operators are equivalent.
316     ??? This is far too pessimistic.  We should allow swapped operands,
317     different CCmodes, or for example comparisons for interval, that
318     dominate even when operands are not equivalent.  */
319  if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
320      || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
321    return NULL;
322
323  /* Short circuit cases where block B contains some side effects, as we can't
324     safely bypass it.  */
325  for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
326       insn = NEXT_INSN (insn))
327    if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
328      {
329	b->flags |= BB_NONTHREADABLE_BLOCK;
330	return NULL;
331      }
332
333  cselib_init (false);
334
335  /* First process all values computed in the source basic block.  */
336  for (insn = NEXT_INSN (BB_HEAD (e->src));
337       insn != NEXT_INSN (BB_END (e->src));
338       insn = NEXT_INSN (insn))
339    if (INSN_P (insn))
340      cselib_process_insn (insn);
341
342  nonequal = BITMAP_ALLOC (NULL);
343  CLEAR_REG_SET (nonequal);
344
345  /* Now assume that we've continued by the edge E to B and continue
346     processing as if it were same basic block.
347     Our goal is to prove that whole block is an NOOP.  */
348
349  for (insn = NEXT_INSN (BB_HEAD (b));
350       insn != NEXT_INSN (BB_END (b)) && !failed;
351       insn = NEXT_INSN (insn))
352    {
353      if (INSN_P (insn))
354	{
355	  rtx pat = PATTERN (insn);
356
357	  if (GET_CODE (pat) == PARALLEL)
358	    {
359	      for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
360		failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
361	    }
362	  else
363	    failed |= mark_effect (pat, nonequal);
364	}
365
366      cselib_process_insn (insn);
367    }
368
369  /* Later we should clear nonequal of dead registers.  So far we don't
370     have life information in cfg_cleanup.  */
371  if (failed)
372    {
373      b->flags |= BB_NONTHREADABLE_BLOCK;
374      goto failed_exit;
375    }
376
377  /* cond2 must not mention any register that is not equal to the
378     former block.  */
379  if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
380    goto failed_exit;
381
382  /* In case liveness information is available, we need to prove equivalence
383     only of the live values.  */
384  if (mode & CLEANUP_UPDATE_LIFE)
385    AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
386
387  EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
388    goto failed_exit;
389
390  BITMAP_FREE (nonequal);
391  cselib_finish ();
392  if ((comparison_dominates_p (code1, code2) != 0)
393      != (XEXP (SET_SRC (set2), 1) == pc_rtx))
394    return BRANCH_EDGE (b);
395  else
396    return FALLTHRU_EDGE (b);
397
398failed_exit:
399  BITMAP_FREE (nonequal);
400  cselib_finish ();
401  return NULL;
402}
403
404/* Attempt to forward edges leaving basic block B.
405   Return true if successful.  */
406
407static bool
408try_forward_edges (int mode, basic_block b)
409{
410  bool changed = false;
411  edge_iterator ei;
412  edge e, *threaded_edges = NULL;
413
414  /* If we are partitioning hot/cold basic blocks, we don't want to
415     mess up unconditional or indirect jumps that cross between hot
416     and cold sections.
417
418     Basic block partitioning may result in some jumps that appear to
419     be optimizable (or blocks that appear to be mergeable), but which really m
420     ust be left untouched (they are required to make it safely across
421     partition boundaries).  See the comments at the top of
422     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
423
424  if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
425    return false;
426
427  for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
428    {
429      basic_block target, first;
430      int counter;
431      bool threaded = false;
432      int nthreaded_edges = 0;
433      bool may_thread = first_pass | (b->flags & BB_DIRTY);
434
435      /* Skip complex edges because we don't know how to update them.
436
437         Still handle fallthru edges, as we can succeed to forward fallthru
438         edge to the same place as the branch edge of conditional branch
439         and turn conditional branch to an unconditional branch.  */
440      if (e->flags & EDGE_COMPLEX)
441	{
442	  ei_next (&ei);
443	  continue;
444	}
445
446      target = first = e->dest;
447      counter = 0;
448
449      /* If we are partitioning hot/cold basic_blocks, we don't want to mess
450	 up jumps that cross between hot/cold sections.
451
452	 Basic block partitioning may result in some jumps that appear
453	 to be optimizable (or blocks that appear to be mergeable), but which
454	 really must be left untouched (they are required to make it safely
455	 across partition boundaries).  See the comments at the top of
456	 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457	 details.  */
458
459      if (first != EXIT_BLOCK_PTR
460	  && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
461	return false;
462
463      while (counter < n_basic_blocks)
464	{
465	  basic_block new_target = NULL;
466	  bool new_target_threaded = false;
467	  may_thread |= target->flags & BB_DIRTY;
468
469	  if (FORWARDER_BLOCK_P (target)
470  	      && !(single_succ_edge (target)->flags & EDGE_CROSSING)
471	      && single_succ (target) != EXIT_BLOCK_PTR)
472	    {
473	      /* Bypass trivial infinite loops.  */
474	      new_target = single_succ (target);
475	      if (target == new_target)
476		counter = n_basic_blocks;
477	    }
478
479	  /* Allow to thread only over one edge at time to simplify updating
480	     of probabilities.  */
481	  else if ((mode & CLEANUP_THREADING) && may_thread)
482	    {
483	      edge t = thread_jump (mode, e, target);
484	      if (t)
485		{
486		  if (!threaded_edges)
487		    threaded_edges = xmalloc (sizeof (*threaded_edges)
488					      * n_basic_blocks);
489		  else
490		    {
491		      int i;
492
493		      /* Detect an infinite loop across blocks not
494			 including the start block.  */
495		      for (i = 0; i < nthreaded_edges; ++i)
496			if (threaded_edges[i] == t)
497			  break;
498		      if (i < nthreaded_edges)
499			{
500			  counter = n_basic_blocks;
501			  break;
502			}
503		    }
504
505		  /* Detect an infinite loop across the start block.  */
506		  if (t->dest == b)
507		    break;
508
509		  gcc_assert (nthreaded_edges < n_basic_blocks);
510		  threaded_edges[nthreaded_edges++] = t;
511
512		  new_target = t->dest;
513		  new_target_threaded = true;
514		}
515	    }
516
517	  if (!new_target)
518	    break;
519
520	  /* Avoid killing of loop pre-headers, as it is the place loop
521	     optimizer wants to hoist code to.
522
523	     For fallthru forwarders, the LOOP_BEG note must appear between
524	     the header of block and CODE_LABEL of the loop, for non forwarders
525	     it must appear before the JUMP_INSN.  */
526	  if ((mode & CLEANUP_PRE_LOOP) && optimize && flag_loop_optimize)
527	    {
528	      rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
529			  ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
530
531	      if (!NOTE_P (insn))
532		insn = NEXT_INSN (insn);
533
534	      for (; insn && !LABEL_P (insn) && !INSN_P (insn);
535		   insn = NEXT_INSN (insn))
536		if (NOTE_P (insn)
537		    && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
538		  break;
539
540	      if (insn && NOTE_P (insn))
541		break;
542
543	      /* Do not clean up branches to just past the end of a loop
544		 at this time; it can mess up the loop optimizer's
545		 recognition of some patterns.  */
546
547	      insn = PREV_INSN (BB_HEAD (target));
548	      if (insn && NOTE_P (insn)
549		    && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
550		break;
551	    }
552
553	  counter++;
554	  target = new_target;
555	  threaded |= new_target_threaded;
556	}
557
558      if (counter >= n_basic_blocks)
559	{
560	  if (dump_file)
561	    fprintf (dump_file, "Infinite loop in BB %i.\n",
562		     target->index);
563	}
564      else if (target == first)
565	; /* We didn't do anything.  */
566      else
567	{
568	  /* Save the values now, as the edge may get removed.  */
569	  gcov_type edge_count = e->count;
570	  int edge_probability = e->probability;
571	  int edge_frequency;
572	  int n = 0;
573
574	  /* Don't force if target is exit block.  */
575	  if (threaded && target != EXIT_BLOCK_PTR)
576	    {
577	      notice_new_block (redirect_edge_and_branch_force (e, target));
578	      if (dump_file)
579		fprintf (dump_file, "Conditionals threaded.\n");
580	    }
581	  else if (!redirect_edge_and_branch (e, target))
582	    {
583	      if (dump_file)
584		fprintf (dump_file,
585			 "Forwarding edge %i->%i to %i failed.\n",
586			 b->index, e->dest->index, target->index);
587	      ei_next (&ei);
588	      continue;
589	    }
590
591	  /* We successfully forwarded the edge.  Now update profile
592	     data: for each edge we traversed in the chain, remove
593	     the original edge's execution count.  */
594	  edge_frequency = ((edge_probability * b->frequency
595			     + REG_BR_PROB_BASE / 2)
596			    / REG_BR_PROB_BASE);
597
598	  if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
599	    b->flags |= BB_FORWARDER_BLOCK;
600
601	  do
602	    {
603	      edge t;
604
605	      if (!single_succ_p (first))
606		{
607		  gcc_assert (n < nthreaded_edges);
608		  t = threaded_edges [n++];
609		  gcc_assert (t->src == first);
610		  update_bb_profile_for_threading (first, edge_frequency,
611						   edge_count, t);
612		  update_br_prob_note (first);
613		}
614	      else
615		{
616		  first->count -= edge_count;
617		  if (first->count < 0)
618		    first->count = 0;
619		  first->frequency -= edge_frequency;
620		  if (first->frequency < 0)
621		    first->frequency = 0;
622		  /* It is possible that as the result of
623		     threading we've removed edge as it is
624		     threaded to the fallthru edge.  Avoid
625		     getting out of sync.  */
626		  if (n < nthreaded_edges
627		      && first == threaded_edges [n]->src)
628		    n++;
629		  t = single_succ_edge (first);
630		}
631
632	      t->count -= edge_count;
633	      if (t->count < 0)
634		t->count = 0;
635	      first = t->dest;
636	    }
637	  while (first != target);
638
639	  changed = true;
640	  continue;
641	}
642      ei_next (&ei);
643    }
644
645  if (threaded_edges)
646    free (threaded_edges);
647  return changed;
648}
649
650
651/* Blocks A and B are to be merged into a single block.  A has no incoming
652   fallthru edge, so it can be moved before B without adding or modifying
653   any jumps (aside from the jump from A to B).  */
654
655static void
656merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
657{
658  rtx barrier;
659  bool only_notes;
660
661  /* If we are partitioning hot/cold basic blocks, we don't want to
662     mess up unconditional or indirect jumps that cross between hot
663     and cold sections.
664
665     Basic block partitioning may result in some jumps that appear to
666     be optimizable (or blocks that appear to be mergeable), but which really
667     must be left untouched (they are required to make it safely across
668     partition boundaries).  See the comments at the top of
669     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
670
671  if (BB_PARTITION (a) != BB_PARTITION (b))
672    return;
673
674  barrier = next_nonnote_insn (BB_END (a));
675  gcc_assert (BARRIER_P (barrier));
676  delete_insn (barrier);
677
678  /* Move block and loop notes out of the chain so that we do not
679     disturb their order.
680
681     ??? A better solution would be to squeeze out all the non-nested notes
682     and adjust the block trees appropriately.   Even better would be to have
683     a tighter connection between block trees and rtl so that this is not
684     necessary.  */
685  only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
686  gcc_assert (!only_notes);
687
688  /* Scramble the insn chain.  */
689  if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
690    reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
691  a->flags |= BB_DIRTY;
692
693  if (dump_file)
694    fprintf (dump_file, "Moved block %d before %d and merged.\n",
695	     a->index, b->index);
696
697  /* Swap the records for the two blocks around.  */
698
699  unlink_block (a);
700  link_block (a, b->prev_bb);
701
702  /* Now blocks A and B are contiguous.  Merge them.  */
703  merge_blocks (a, b);
704}
705
706/* Blocks A and B are to be merged into a single block.  B has no outgoing
707   fallthru edge, so it can be moved after A without adding or modifying
708   any jumps (aside from the jump from A to B).  */
709
710static void
711merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
712{
713  rtx barrier, real_b_end;
714  rtx label, table;
715  bool only_notes;
716
717  /* If we are partitioning hot/cold basic blocks, we don't want to
718     mess up unconditional or indirect jumps that cross between hot
719     and cold sections.
720
721     Basic block partitioning may result in some jumps that appear to
722     be optimizable (or blocks that appear to be mergeable), but which really
723     must be left untouched (they are required to make it safely across
724     partition boundaries).  See the comments at the top of
725     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
726
727  if (BB_PARTITION (a) != BB_PARTITION (b))
728    return;
729
730  real_b_end = BB_END (b);
731
732  /* If there is a jump table following block B temporarily add the jump table
733     to block B so that it will also be moved to the correct location.  */
734  if (tablejump_p (BB_END (b), &label, &table)
735      && prev_active_insn (label) == BB_END (b))
736    {
737      BB_END (b) = table;
738    }
739
740  /* There had better have been a barrier there.  Delete it.  */
741  barrier = NEXT_INSN (BB_END (b));
742  if (barrier && BARRIER_P (barrier))
743    delete_insn (barrier);
744
745  /* Move block and loop notes out of the chain so that we do not
746     disturb their order.
747
748     ??? A better solution would be to squeeze out all the non-nested notes
749     and adjust the block trees appropriately.   Even better would be to have
750     a tighter connection between block trees and rtl so that this is not
751     necessary.  */
752  only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
753  gcc_assert (!only_notes);
754
755
756  /* Scramble the insn chain.  */
757  reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
758
759  /* Restore the real end of b.  */
760  BB_END (b) = real_b_end;
761
762  if (dump_file)
763    fprintf (dump_file, "Moved block %d after %d and merged.\n",
764	     b->index, a->index);
765
766  /* Now blocks A and B are contiguous.  Merge them.  */
767  merge_blocks (a, b);
768}
769
770/* Attempt to merge basic blocks that are potentially non-adjacent.
771   Return NULL iff the attempt failed, otherwise return basic block
772   where cleanup_cfg should continue.  Because the merging commonly
773   moves basic block away or introduces another optimization
774   possibility, return basic block just before B so cleanup_cfg don't
775   need to iterate.
776
777   It may be good idea to return basic block before C in the case
778   C has been moved after B and originally appeared earlier in the
779   insn sequence, but we have no information available about the
780   relative ordering of these two.  Hopefully it is not too common.  */
781
782static basic_block
783merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
784{
785  basic_block next;
786
787  /* If we are partitioning hot/cold basic blocks, we don't want to
788     mess up unconditional or indirect jumps that cross between hot
789     and cold sections.
790
791     Basic block partitioning may result in some jumps that appear to
792     be optimizable (or blocks that appear to be mergeable), but which really
793     must be left untouched (they are required to make it safely across
794     partition boundaries).  See the comments at the top of
795     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
796
797  if (BB_PARTITION (b) != BB_PARTITION (c))
798    return NULL;
799
800
801
802  /* If B has a fallthru edge to C, no need to move anything.  */
803  if (e->flags & EDGE_FALLTHRU)
804    {
805      int b_index = b->index, c_index = c->index;
806      merge_blocks (b, c);
807      update_forwarder_flag (b);
808
809      if (dump_file)
810	fprintf (dump_file, "Merged %d and %d without moving.\n",
811		 b_index, c_index);
812
813      return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
814    }
815
816  /* Otherwise we will need to move code around.  Do that only if expensive
817     transformations are allowed.  */
818  else if (mode & CLEANUP_EXPENSIVE)
819    {
820      edge tmp_edge, b_fallthru_edge;
821      bool c_has_outgoing_fallthru;
822      bool b_has_incoming_fallthru;
823      edge_iterator ei;
824
825      /* Avoid overactive code motion, as the forwarder blocks should be
826         eliminated by edge redirection instead.  One exception might have
827	 been if B is a forwarder block and C has no fallthru edge, but
828	 that should be cleaned up by bb-reorder instead.  */
829      if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
830	return NULL;
831
832      /* We must make sure to not munge nesting of lexical blocks,
833	 and loop notes.  This is done by squeezing out all the notes
834	 and leaving them there to lie.  Not ideal, but functional.  */
835
836      FOR_EACH_EDGE (tmp_edge, ei, c->succs)
837	if (tmp_edge->flags & EDGE_FALLTHRU)
838	  break;
839
840      c_has_outgoing_fallthru = (tmp_edge != NULL);
841
842      FOR_EACH_EDGE (tmp_edge, ei, b->preds)
843	if (tmp_edge->flags & EDGE_FALLTHRU)
844	  break;
845
846      b_has_incoming_fallthru = (tmp_edge != NULL);
847      b_fallthru_edge = tmp_edge;
848      next = b->prev_bb;
849      if (next == c)
850	next = next->prev_bb;
851
852      /* Otherwise, we're going to try to move C after B.  If C does
853	 not have an outgoing fallthru, then it can be moved
854	 immediately after B without introducing or modifying jumps.  */
855      if (! c_has_outgoing_fallthru)
856	{
857	  merge_blocks_move_successor_nojumps (b, c);
858          return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
859	}
860
861      /* If B does not have an incoming fallthru, then it can be moved
862	 immediately before C without introducing or modifying jumps.
863	 C cannot be the first block, so we do not have to worry about
864	 accessing a non-existent block.  */
865
866      if (b_has_incoming_fallthru)
867	{
868	  basic_block bb;
869
870	  if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
871	    return NULL;
872	  bb = force_nonfallthru (b_fallthru_edge);
873	  if (bb)
874	    notice_new_block (bb);
875	}
876
877      merge_blocks_move_predecessor_nojumps (b, c);
878      return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
879    }
880
881  return NULL;
882}
883
884
885/* Removes the memory attributes of MEM expression
886   if they are not equal.  */
887
888void
889merge_memattrs (rtx x, rtx y)
890{
891  int i;
892  int j;
893  enum rtx_code code;
894  const char *fmt;
895
896  if (x == y)
897    return;
898  if (x == 0 || y == 0)
899    return;
900
901  code = GET_CODE (x);
902
903  if (code != GET_CODE (y))
904    return;
905
906  if (GET_MODE (x) != GET_MODE (y))
907    return;
908
909  if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
910    {
911      if (! MEM_ATTRS (x))
912	MEM_ATTRS (y) = 0;
913      else if (! MEM_ATTRS (y))
914	MEM_ATTRS (x) = 0;
915      else
916	{
917	  rtx mem_size;
918
919	  if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
920	    {
921	      set_mem_alias_set (x, 0);
922	      set_mem_alias_set (y, 0);
923	    }
924
925	  if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
926	    {
927	      set_mem_expr (x, 0);
928	      set_mem_expr (y, 0);
929	      set_mem_offset (x, 0);
930	      set_mem_offset (y, 0);
931	    }
932	  else if (MEM_OFFSET (x) != MEM_OFFSET (y))
933	    {
934	      set_mem_offset (x, 0);
935	      set_mem_offset (y, 0);
936	    }
937
938	  if (!MEM_SIZE (x))
939	    mem_size = NULL_RTX;
940	  else if (!MEM_SIZE (y))
941	    mem_size = NULL_RTX;
942	  else
943	    mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
944				     INTVAL (MEM_SIZE (y))));
945	  set_mem_size (x, mem_size);
946	  set_mem_size (y, mem_size);
947
948	  set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
949	  set_mem_align (y, MEM_ALIGN (x));
950	}
951    }
952
953  fmt = GET_RTX_FORMAT (code);
954  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
955    {
956      switch (fmt[i])
957	{
958	case 'E':
959	  /* Two vectors must have the same length.  */
960	  if (XVECLEN (x, i) != XVECLEN (y, i))
961	    return;
962
963	  for (j = 0; j < XVECLEN (x, i); j++)
964	    merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
965
966	  break;
967
968	case 'e':
969	  merge_memattrs (XEXP (x, i), XEXP (y, i));
970	}
971    }
972  return;
973}
974
975
976/* Return true if I1 and I2 are equivalent and thus can be crossjumped.  */
977
978static bool
979insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
980{
981  rtx p1, p2;
982
983  /* Verify that I1 and I2 are equivalent.  */
984  if (GET_CODE (i1) != GET_CODE (i2))
985    return false;
986
987  p1 = PATTERN (i1);
988  p2 = PATTERN (i2);
989
990  if (GET_CODE (p1) != GET_CODE (p2))
991    return false;
992
993  /* If this is a CALL_INSN, compare register usage information.
994     If we don't check this on stack register machines, the two
995     CALL_INSNs might be merged leaving reg-stack.c with mismatching
996     numbers of stack registers in the same basic block.
997     If we don't check this on machines with delay slots, a delay slot may
998     be filled that clobbers a parameter expected by the subroutine.
999
1000     ??? We take the simple route for now and assume that if they're
1001     equal, they were constructed identically.  */
1002
1003  if (CALL_P (i1)
1004      && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1005		        CALL_INSN_FUNCTION_USAGE (i2))
1006	  || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1007    return false;
1008
1009#ifdef STACK_REGS
1010  /* If cross_jump_death_matters is not 0, the insn's mode
1011     indicates whether or not the insn contains any stack-like
1012     regs.  */
1013
1014  if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1015    {
1016      /* If register stack conversion has already been done, then
1017         death notes must also be compared before it is certain that
1018         the two instruction streams match.  */
1019
1020      rtx note;
1021      HARD_REG_SET i1_regset, i2_regset;
1022
1023      CLEAR_HARD_REG_SET (i1_regset);
1024      CLEAR_HARD_REG_SET (i2_regset);
1025
1026      for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1027	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1028	  SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1029
1030      for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1031	if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1032	  SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1033
1034      GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1035
1036      return false;
1037
1038    done:
1039      ;
1040    }
1041#endif
1042
1043  if (reload_completed
1044      ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1045    return true;
1046
1047  /* Do not do EQUIV substitution after reload.  First, we're undoing the
1048     work of reload_cse.  Second, we may be undoing the work of the post-
1049     reload splitting pass.  */
1050  /* ??? Possibly add a new phase switch variable that can be used by
1051     targets to disallow the troublesome insns after splitting.  */
1052  if (!reload_completed)
1053    {
1054      /* The following code helps take care of G++ cleanups.  */
1055      rtx equiv1 = find_reg_equal_equiv_note (i1);
1056      rtx equiv2 = find_reg_equal_equiv_note (i2);
1057
1058      if (equiv1 && equiv2
1059	  /* If the equivalences are not to a constant, they may
1060	     reference pseudos that no longer exist, so we can't
1061	     use them.  */
1062	  && (! reload_completed
1063	      || (CONSTANT_P (XEXP (equiv1, 0))
1064		  && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1065	{
1066	  rtx s1 = single_set (i1);
1067	  rtx s2 = single_set (i2);
1068	  if (s1 != 0 && s2 != 0
1069	      && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1070	    {
1071	      validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1072	      validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1073	      if (! rtx_renumbered_equal_p (p1, p2))
1074		cancel_changes (0);
1075	      else if (apply_change_group ())
1076		return true;
1077	    }
1078	}
1079    }
1080
1081  return false;
1082}
1083
1084/* Look through the insns at the end of BB1 and BB2 and find the longest
1085   sequence that are equivalent.  Store the first insns for that sequence
1086   in *F1 and *F2 and return the sequence length.
1087
1088   To simplify callers of this function, if the blocks match exactly,
1089   store the head of the blocks in *F1 and *F2.  */
1090
1091static int
1092flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1093		      basic_block bb2, rtx *f1, rtx *f2)
1094{
1095  rtx i1, i2, last1, last2, afterlast1, afterlast2;
1096  int ninsns = 0;
1097
1098  /* Skip simple jumps at the end of the blocks.  Complex jumps still
1099     need to be compared for equivalence, which we'll do below.  */
1100
1101  i1 = BB_END (bb1);
1102  last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1103  if (onlyjump_p (i1)
1104      || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1105    {
1106      last1 = i1;
1107      i1 = PREV_INSN (i1);
1108    }
1109
1110  i2 = BB_END (bb2);
1111  if (onlyjump_p (i2)
1112      || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1113    {
1114      last2 = i2;
1115      /* Count everything except for unconditional jump as insn.  */
1116      if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1117	ninsns++;
1118      i2 = PREV_INSN (i2);
1119    }
1120
1121  while (true)
1122    {
1123      /* Ignore notes.  */
1124      while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1125	i1 = PREV_INSN (i1);
1126
1127      while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1128	i2 = PREV_INSN (i2);
1129
1130      if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1131	break;
1132
1133      if (!insns_match_p (mode, i1, i2))
1134	break;
1135
1136      merge_memattrs (i1, i2);
1137
1138      /* Don't begin a cross-jump with a NOTE insn.  */
1139      if (INSN_P (i1))
1140	{
1141	  /* If the merged insns have different REG_EQUAL notes, then
1142	     remove them.  */
1143	  rtx equiv1 = find_reg_equal_equiv_note (i1);
1144	  rtx equiv2 = find_reg_equal_equiv_note (i2);
1145
1146	  if (equiv1 && !equiv2)
1147	    remove_note (i1, equiv1);
1148	  else if (!equiv1 && equiv2)
1149	    remove_note (i2, equiv2);
1150	  else if (equiv1 && equiv2
1151		   && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1152	    {
1153	      remove_note (i1, equiv1);
1154	      remove_note (i2, equiv2);
1155	    }
1156
1157	  afterlast1 = last1, afterlast2 = last2;
1158	  last1 = i1, last2 = i2;
1159	  ninsns++;
1160	}
1161
1162      i1 = PREV_INSN (i1);
1163      i2 = PREV_INSN (i2);
1164    }
1165
1166#ifdef HAVE_cc0
1167  /* Don't allow the insn after a compare to be shared by
1168     cross-jumping unless the compare is also shared.  */
1169  if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1170    last1 = afterlast1, last2 = afterlast2, ninsns--;
1171#endif
1172
1173  /* Include preceding notes and labels in the cross-jump.  One,
1174     this may bring us to the head of the blocks as requested above.
1175     Two, it keeps line number notes as matched as may be.  */
1176  if (ninsns)
1177    {
1178      while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1179	last1 = PREV_INSN (last1);
1180
1181      if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1182	last1 = PREV_INSN (last1);
1183
1184      while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1185	last2 = PREV_INSN (last2);
1186
1187      if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1188	last2 = PREV_INSN (last2);
1189
1190      *f1 = last1;
1191      *f2 = last2;
1192    }
1193
1194  return ninsns;
1195}
1196
1197/* Return true iff outgoing edges of BB1 and BB2 match, together with
1198   the branch instruction.  This means that if we commonize the control
1199   flow before end of the basic block, the semantic remains unchanged.
1200
1201   We may assume that there exists one edge with a common destination.  */
1202
1203static bool
1204outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1205{
1206  int nehedges1 = 0, nehedges2 = 0;
1207  edge fallthru1 = 0, fallthru2 = 0;
1208  edge e1, e2;
1209  edge_iterator ei;
1210
1211  /* If BB1 has only one successor, we may be looking at either an
1212     unconditional jump, or a fake edge to exit.  */
1213  if (single_succ_p (bb1)
1214      && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1215      && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1216    return (single_succ_p (bb2)
1217	    && (single_succ_edge (bb2)->flags
1218		& (EDGE_COMPLEX | EDGE_FAKE)) == 0
1219	    && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1220
1221  /* Match conditional jumps - this may get tricky when fallthru and branch
1222     edges are crossed.  */
1223  if (EDGE_COUNT (bb1->succs) == 2
1224      && any_condjump_p (BB_END (bb1))
1225      && onlyjump_p (BB_END (bb1)))
1226    {
1227      edge b1, f1, b2, f2;
1228      bool reverse, match;
1229      rtx set1, set2, cond1, cond2;
1230      enum rtx_code code1, code2;
1231
1232      if (EDGE_COUNT (bb2->succs) != 2
1233	  || !any_condjump_p (BB_END (bb2))
1234	  || !onlyjump_p (BB_END (bb2)))
1235	return false;
1236
1237      b1 = BRANCH_EDGE (bb1);
1238      b2 = BRANCH_EDGE (bb2);
1239      f1 = FALLTHRU_EDGE (bb1);
1240      f2 = FALLTHRU_EDGE (bb2);
1241
1242      /* Get around possible forwarders on fallthru edges.  Other cases
1243         should be optimized out already.  */
1244      if (FORWARDER_BLOCK_P (f1->dest))
1245	f1 = single_succ_edge (f1->dest);
1246
1247      if (FORWARDER_BLOCK_P (f2->dest))
1248	f2 = single_succ_edge (f2->dest);
1249
1250      /* To simplify use of this function, return false if there are
1251	 unneeded forwarder blocks.  These will get eliminated later
1252	 during cleanup_cfg.  */
1253      if (FORWARDER_BLOCK_P (f1->dest)
1254	  || FORWARDER_BLOCK_P (f2->dest)
1255	  || FORWARDER_BLOCK_P (b1->dest)
1256	  || FORWARDER_BLOCK_P (b2->dest))
1257	return false;
1258
1259      if (f1->dest == f2->dest && b1->dest == b2->dest)
1260	reverse = false;
1261      else if (f1->dest == b2->dest && b1->dest == f2->dest)
1262	reverse = true;
1263      else
1264	return false;
1265
1266      set1 = pc_set (BB_END (bb1));
1267      set2 = pc_set (BB_END (bb2));
1268      if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1269	  != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1270	reverse = !reverse;
1271
1272      cond1 = XEXP (SET_SRC (set1), 0);
1273      cond2 = XEXP (SET_SRC (set2), 0);
1274      code1 = GET_CODE (cond1);
1275      if (reverse)
1276	code2 = reversed_comparison_code (cond2, BB_END (bb2));
1277      else
1278	code2 = GET_CODE (cond2);
1279
1280      if (code2 == UNKNOWN)
1281	return false;
1282
1283      /* Verify codes and operands match.  */
1284      match = ((code1 == code2
1285		&& rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1286		&& rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1287	       || (code1 == swap_condition (code2)
1288		   && rtx_renumbered_equal_p (XEXP (cond1, 1),
1289					      XEXP (cond2, 0))
1290		   && rtx_renumbered_equal_p (XEXP (cond1, 0),
1291					      XEXP (cond2, 1))));
1292
1293      /* If we return true, we will join the blocks.  Which means that
1294	 we will only have one branch prediction bit to work with.  Thus
1295	 we require the existing branches to have probabilities that are
1296	 roughly similar.  */
1297      if (match
1298	  && !optimize_size
1299	  && maybe_hot_bb_p (bb1)
1300	  && maybe_hot_bb_p (bb2))
1301	{
1302	  int prob2;
1303
1304	  if (b1->dest == b2->dest)
1305	    prob2 = b2->probability;
1306	  else
1307	    /* Do not use f2 probability as f2 may be forwarded.  */
1308	    prob2 = REG_BR_PROB_BASE - b2->probability;
1309
1310	  /* Fail if the difference in probabilities is greater than 50%.
1311	     This rules out two well-predicted branches with opposite
1312	     outcomes.  */
1313	  if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1314	    {
1315	      if (dump_file)
1316		fprintf (dump_file,
1317			 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1318			 bb1->index, bb2->index, b1->probability, prob2);
1319
1320	      return false;
1321	    }
1322	}
1323
1324      if (dump_file && match)
1325	fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1326		 bb1->index, bb2->index);
1327
1328      return match;
1329    }
1330
1331  /* Generic case - we are seeing a computed jump, table jump or trapping
1332     instruction.  */
1333
1334  /* Check whether there are tablejumps in the end of BB1 and BB2.
1335     Return true if they are identical.  */
1336    {
1337      rtx label1, label2;
1338      rtx table1, table2;
1339
1340      if (tablejump_p (BB_END (bb1), &label1, &table1)
1341	  && tablejump_p (BB_END (bb2), &label2, &table2)
1342	  && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1343	{
1344	  /* The labels should never be the same rtx.  If they really are same
1345	     the jump tables are same too. So disable crossjumping of blocks BB1
1346	     and BB2 because when deleting the common insns in the end of BB1
1347	     by delete_basic_block () the jump table would be deleted too.  */
1348	  /* If LABEL2 is referenced in BB1->END do not do anything
1349	     because we would loose information when replacing
1350	     LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END.  */
1351	  if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1352	    {
1353	      /* Set IDENTICAL to true when the tables are identical.  */
1354	      bool identical = false;
1355	      rtx p1, p2;
1356
1357	      p1 = PATTERN (table1);
1358	      p2 = PATTERN (table2);
1359	      if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1360		{
1361		  identical = true;
1362		}
1363	      else if (GET_CODE (p1) == ADDR_DIFF_VEC
1364		       && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1365		       && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1366		       && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1367		{
1368		  int i;
1369
1370		  identical = true;
1371		  for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1372		    if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1373		      identical = false;
1374		}
1375
1376	      if (identical)
1377		{
1378		  replace_label_data rr;
1379		  bool match;
1380
1381		  /* Temporarily replace references to LABEL1 with LABEL2
1382		     in BB1->END so that we could compare the instructions.  */
1383		  rr.r1 = label1;
1384		  rr.r2 = label2;
1385		  rr.update_label_nuses = false;
1386		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1387
1388		  match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1389		  if (dump_file && match)
1390		    fprintf (dump_file,
1391			     "Tablejumps in bb %i and %i match.\n",
1392			     bb1->index, bb2->index);
1393
1394		  /* Set the original label in BB1->END because when deleting
1395		     a block whose end is a tablejump, the tablejump referenced
1396		     from the instruction is deleted too.  */
1397		  rr.r1 = label2;
1398		  rr.r2 = label1;
1399		  for_each_rtx (&BB_END (bb1), replace_label, &rr);
1400
1401		  return match;
1402		}
1403	    }
1404	  return false;
1405	}
1406    }
1407
1408  /* First ensure that the instructions match.  There may be many outgoing
1409     edges so this test is generally cheaper.  */
1410  if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1411    return false;
1412
1413  /* Search the outgoing edges, ensure that the counts do match, find possible
1414     fallthru and exception handling edges since these needs more
1415     validation.  */
1416  if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1417    return false;
1418
1419  FOR_EACH_EDGE (e1, ei, bb1->succs)
1420    {
1421      e2 = EDGE_SUCC (bb2, ei.index);
1422
1423      if (e1->flags & EDGE_EH)
1424	nehedges1++;
1425
1426      if (e2->flags & EDGE_EH)
1427	nehedges2++;
1428
1429      if (e1->flags & EDGE_FALLTHRU)
1430	fallthru1 = e1;
1431      if (e2->flags & EDGE_FALLTHRU)
1432	fallthru2 = e2;
1433    }
1434
1435  /* If number of edges of various types does not match, fail.  */
1436  if (nehedges1 != nehedges2
1437      || (fallthru1 != 0) != (fallthru2 != 0))
1438    return false;
1439
1440  /* fallthru edges must be forwarded to the same destination.  */
1441  if (fallthru1)
1442    {
1443      basic_block d1 = (forwarder_block_p (fallthru1->dest)
1444			? single_succ (fallthru1->dest): fallthru1->dest);
1445      basic_block d2 = (forwarder_block_p (fallthru2->dest)
1446			? single_succ (fallthru2->dest): fallthru2->dest);
1447
1448      if (d1 != d2)
1449	return false;
1450    }
1451
1452  /* Ensure the same EH region.  */
1453  {
1454    rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1455    rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1456
1457    if (!n1 && n2)
1458      return false;
1459
1460    if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1461      return false;
1462  }
1463
1464  /* We don't need to match the rest of edges as above checks should be enough
1465     to ensure that they are equivalent.  */
1466  return true;
1467}
1468
1469/* E1 and E2 are edges with the same destination block.  Search their
1470   predecessors for common code.  If found, redirect control flow from
1471   (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC.  */
1472
1473static bool
1474try_crossjump_to_edge (int mode, edge e1, edge e2)
1475{
1476  int nmatch;
1477  basic_block src1 = e1->src, src2 = e2->src;
1478  basic_block redirect_to, redirect_from, to_remove;
1479  rtx newpos1, newpos2;
1480  edge s;
1481  edge_iterator ei;
1482
1483  newpos1 = newpos2 = NULL_RTX;
1484
1485  /* If we have partitioned hot/cold basic blocks, it is a bad idea
1486     to try this optimization.
1487
1488     Basic block partitioning may result in some jumps that appear to
1489     be optimizable (or blocks that appear to be mergeable), but which really
1490     must be left untouched (they are required to make it safely across
1491     partition boundaries).  See the comments at the top of
1492     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1493
1494  if (flag_reorder_blocks_and_partition && no_new_pseudos)
1495    return false;
1496
1497  /* Search backward through forwarder blocks.  We don't need to worry
1498     about multiple entry or chained forwarders, as they will be optimized
1499     away.  We do this to look past the unconditional jump following a
1500     conditional jump that is required due to the current CFG shape.  */
1501  if (single_pred_p (src1)
1502      && FORWARDER_BLOCK_P (src1))
1503    e1 = single_pred_edge (src1), src1 = e1->src;
1504
1505  if (single_pred_p (src2)
1506      && FORWARDER_BLOCK_P (src2))
1507    e2 = single_pred_edge (src2), src2 = e2->src;
1508
1509  /* Nothing to do if we reach ENTRY, or a common source block.  */
1510  if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1511    return false;
1512  if (src1 == src2)
1513    return false;
1514
1515  /* Seeing more than 1 forwarder blocks would confuse us later...  */
1516  if (FORWARDER_BLOCK_P (e1->dest)
1517      && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1518    return false;
1519
1520  if (FORWARDER_BLOCK_P (e2->dest)
1521      && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1522    return false;
1523
1524  /* Likewise with dead code (possibly newly created by the other optimizations
1525     of cfg_cleanup).  */
1526  if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1527    return false;
1528
1529  /* Look for the common insn sequence, part the first ...  */
1530  if (!outgoing_edges_match (mode, src1, src2))
1531    return false;
1532
1533  /* ... and part the second.  */
1534  nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1535
1536  /* Don't proceed with the crossjump unless we found a sufficient number
1537     of matching instructions or the 'from' block was totally matched
1538     (such that its predecessors will hopefully be redirected and the
1539     block removed).  */
1540  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1541      && (newpos1 != BB_HEAD (src1)))
1542    return false;
1543
1544  /* Here we know that the insns in the end of SRC1 which are common with SRC2
1545     will be deleted.
1546     If we have tablejumps in the end of SRC1 and SRC2
1547     they have been already compared for equivalence in outgoing_edges_match ()
1548     so replace the references to TABLE1 by references to TABLE2.  */
1549    {
1550      rtx label1, label2;
1551      rtx table1, table2;
1552
1553      if (tablejump_p (BB_END (src1), &label1, &table1)
1554	  && tablejump_p (BB_END (src2), &label2, &table2)
1555	  && label1 != label2)
1556	{
1557	  replace_label_data rr;
1558	  rtx insn;
1559
1560	  /* Replace references to LABEL1 with LABEL2.  */
1561	  rr.r1 = label1;
1562	  rr.r2 = label2;
1563	  rr.update_label_nuses = true;
1564	  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1565	    {
1566	      /* Do not replace the label in SRC1->END because when deleting
1567		 a block whose end is a tablejump, the tablejump referenced
1568		 from the instruction is deleted too.  */
1569	      if (insn != BB_END (src1))
1570		for_each_rtx (&insn, replace_label, &rr);
1571	    }
1572	}
1573    }
1574
1575  /* Avoid splitting if possible.  */
1576  if (newpos2 == BB_HEAD (src2))
1577    redirect_to = src2;
1578  else
1579    {
1580      if (dump_file)
1581	fprintf (dump_file, "Splitting bb %i before %i insns\n",
1582		 src2->index, nmatch);
1583      redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1584    }
1585
1586  if (dump_file)
1587    fprintf (dump_file,
1588	     "Cross jumping from bb %i to bb %i; %i common insns\n",
1589	     src1->index, src2->index, nmatch);
1590
1591  redirect_to->count += src1->count;
1592  redirect_to->frequency += src1->frequency;
1593  /* We may have some registers visible trought the block.  */
1594  redirect_to->flags |= BB_DIRTY;
1595
1596  /* Recompute the frequencies and counts of outgoing edges.  */
1597  FOR_EACH_EDGE (s, ei, redirect_to->succs)
1598    {
1599      edge s2;
1600      edge_iterator ei;
1601      basic_block d = s->dest;
1602
1603      if (FORWARDER_BLOCK_P (d))
1604	d = single_succ (d);
1605
1606      FOR_EACH_EDGE (s2, ei, src1->succs)
1607	{
1608	  basic_block d2 = s2->dest;
1609	  if (FORWARDER_BLOCK_P (d2))
1610	    d2 = single_succ (d2);
1611	  if (d == d2)
1612	    break;
1613	}
1614
1615      s->count += s2->count;
1616
1617      /* Take care to update possible forwarder blocks.  We verified
1618         that there is no more than one in the chain, so we can't run
1619         into infinite loop.  */
1620      if (FORWARDER_BLOCK_P (s->dest))
1621	{
1622	  single_succ_edge (s->dest)->count += s2->count;
1623	  s->dest->count += s2->count;
1624	  s->dest->frequency += EDGE_FREQUENCY (s);
1625	}
1626
1627      if (FORWARDER_BLOCK_P (s2->dest))
1628	{
1629	  single_succ_edge (s2->dest)->count -= s2->count;
1630	  if (single_succ_edge (s2->dest)->count < 0)
1631	    single_succ_edge (s2->dest)->count = 0;
1632	  s2->dest->count -= s2->count;
1633	  s2->dest->frequency -= EDGE_FREQUENCY (s);
1634	  if (s2->dest->frequency < 0)
1635	    s2->dest->frequency = 0;
1636	  if (s2->dest->count < 0)
1637	    s2->dest->count = 0;
1638	}
1639
1640      if (!redirect_to->frequency && !src1->frequency)
1641	s->probability = (s->probability + s2->probability) / 2;
1642      else
1643	s->probability
1644	  = ((s->probability * redirect_to->frequency +
1645	      s2->probability * src1->frequency)
1646	     / (redirect_to->frequency + src1->frequency));
1647    }
1648
1649  update_br_prob_note (redirect_to);
1650
1651  /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1.  */
1652
1653  /* Skip possible basic block header.  */
1654  if (LABEL_P (newpos1))
1655    newpos1 = NEXT_INSN (newpos1);
1656
1657  if (NOTE_P (newpos1))
1658    newpos1 = NEXT_INSN (newpos1);
1659
1660  redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1661  to_remove = single_succ (redirect_from);
1662
1663  redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1664  delete_basic_block (to_remove);
1665
1666  update_forwarder_flag (redirect_from);
1667  if (redirect_to != src2)
1668    update_forwarder_flag (src2);
1669
1670  return true;
1671}
1672
1673/* Search the predecessors of BB for common insn sequences.  When found,
1674   share code between them by redirecting control flow.  Return true if
1675   any changes made.  */
1676
1677static bool
1678try_crossjump_bb (int mode, basic_block bb)
1679{
1680  edge e, e2, fallthru;
1681  bool changed;
1682  unsigned max, ix, ix2;
1683  basic_block ev, ev2;
1684  edge_iterator ei;
1685
1686  /* Nothing to do if there is not at least two incoming edges.  */
1687  if (EDGE_COUNT (bb->preds) < 2)
1688    return false;
1689
1690  /* Don't crossjump if this block ends in a computed jump,
1691     unless we are optimizing for size.  */
1692  if (!optimize_size
1693      && bb != EXIT_BLOCK_PTR
1694      && computed_jump_p (BB_END (bb)))
1695    return false;
1696
1697  /* If we are partitioning hot/cold basic blocks, we don't want to
1698     mess up unconditional or indirect jumps that cross between hot
1699     and cold sections.
1700
1701     Basic block partitioning may result in some jumps that appear to
1702     be optimizable (or blocks that appear to be mergeable), but which really
1703     must be left untouched (they are required to make it safely across
1704     partition boundaries).  See the comments at the top of
1705     bb-reorder.c:partition_hot_cold_basic_blocks for complete details.  */
1706
1707  if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1708                                        BB_PARTITION (EDGE_PRED (bb, 1)->src)
1709      || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1710    return false;
1711
1712  /* It is always cheapest to redirect a block that ends in a branch to
1713     a block that falls through into BB, as that adds no branches to the
1714     program.  We'll try that combination first.  */
1715  fallthru = NULL;
1716  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1717
1718  if (EDGE_COUNT (bb->preds) > max)
1719    return false;
1720
1721  FOR_EACH_EDGE (e, ei, bb->preds)
1722    {
1723      if (e->flags & EDGE_FALLTHRU)
1724        fallthru = e;
1725    }
1726
1727  changed = false;
1728  for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1729    {
1730      e = EDGE_PRED (ev, ix);
1731      ix++;
1732
1733      /* As noted above, first try with the fallthru predecessor.  */
1734      if (fallthru)
1735	{
1736	  /* Don't combine the fallthru edge into anything else.
1737	     If there is a match, we'll do it the other way around.  */
1738	  if (e == fallthru)
1739	    continue;
1740	  /* If nothing changed since the last attempt, there is nothing
1741	     we can do.  */
1742	  if (!first_pass
1743	      && (!(e->src->flags & BB_DIRTY)
1744		  && !(fallthru->src->flags & BB_DIRTY)))
1745	    continue;
1746
1747	  if (try_crossjump_to_edge (mode, e, fallthru))
1748	    {
1749	      changed = true;
1750	      ix = 0;
1751	      ev = bb;
1752	      continue;
1753	    }
1754	}
1755
1756      /* Non-obvious work limiting check: Recognize that we're going
1757	 to call try_crossjump_bb on every basic block.  So if we have
1758	 two blocks with lots of outgoing edges (a switch) and they
1759	 share lots of common destinations, then we would do the
1760	 cross-jump check once for each common destination.
1761
1762	 Now, if the blocks actually are cross-jump candidates, then
1763	 all of their destinations will be shared.  Which means that
1764	 we only need check them for cross-jump candidacy once.  We
1765	 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1766	 choosing to do the check from the block for which the edge
1767	 in question is the first successor of A.  */
1768      if (EDGE_SUCC (e->src, 0) != e)
1769	continue;
1770
1771      for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1772	{
1773	  e2 = EDGE_PRED (ev2, ix2);
1774	  ix2++;
1775
1776	  if (e2 == e)
1777	    continue;
1778
1779	  /* We've already checked the fallthru edge above.  */
1780	  if (e2 == fallthru)
1781	    continue;
1782
1783	  /* The "first successor" check above only prevents multiple
1784	     checks of crossjump(A,B).  In order to prevent redundant
1785	     checks of crossjump(B,A), require that A be the block
1786	     with the lowest index.  */
1787	  if (e->src->index > e2->src->index)
1788	    continue;
1789
1790	  /* If nothing changed since the last attempt, there is nothing
1791	     we can do.  */
1792	  if (!first_pass
1793	      && (!(e->src->flags & BB_DIRTY)
1794		  && !(e2->src->flags & BB_DIRTY)))
1795	    continue;
1796
1797	  if (try_crossjump_to_edge (mode, e, e2))
1798	    {
1799	      changed = true;
1800	      ev2 = bb;
1801	      ix = 0;
1802	      break;
1803	    }
1804	}
1805    }
1806
1807  return changed;
1808}
1809
1810/* Do simple CFG optimizations - basic block merging, simplifying of jump
1811   instructions etc.  Return nonzero if changes were made.  */
1812
1813static bool
1814try_optimize_cfg (int mode)
1815{
1816  bool changed_overall = false;
1817  bool changed;
1818  int iterations = 0;
1819  basic_block bb, b, next;
1820
1821  if (mode & CLEANUP_CROSSJUMP)
1822    add_noreturn_fake_exit_edges ();
1823
1824  if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1825    clear_bb_flags ();
1826
1827  FOR_EACH_BB (bb)
1828    update_forwarder_flag (bb);
1829
1830  if (! targetm.cannot_modify_jumps_p ())
1831    {
1832      first_pass = true;
1833      /* Attempt to merge blocks as made possible by edge removal.  If
1834	 a block has only one successor, and the successor has only
1835	 one predecessor, they may be combined.  */
1836      do
1837	{
1838	  changed = false;
1839	  iterations++;
1840
1841	  if (dump_file)
1842	    fprintf (dump_file,
1843		     "\n\ntry_optimize_cfg iteration %i\n\n",
1844		     iterations);
1845
1846	  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1847	    {
1848	      basic_block c;
1849	      edge s;
1850	      bool changed_here = false;
1851
1852	      /* Delete trivially dead basic blocks.  */
1853	      while (EDGE_COUNT (b->preds) == 0)
1854		{
1855		  c = b->prev_bb;
1856		  if (dump_file)
1857		    fprintf (dump_file, "Deleting block %i.\n",
1858			     b->index);
1859
1860		  delete_basic_block (b);
1861		  if (!(mode & CLEANUP_CFGLAYOUT))
1862		    changed = true;
1863		  b = c;
1864		}
1865
1866	      /* Remove code labels no longer used.  */
1867	      if (single_pred_p (b)
1868		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1869		  && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1870		  && LABEL_P (BB_HEAD (b))
1871		  /* If the previous block ends with a branch to this
1872		     block, we can't delete the label.  Normally this
1873		     is a condjump that is yet to be simplified, but
1874		     if CASE_DROPS_THRU, this can be a tablejump with
1875		     some element going to the same place as the
1876		     default (fallthru).  */
1877		  && (single_pred (b) == ENTRY_BLOCK_PTR
1878		      || !JUMP_P (BB_END (single_pred (b)))
1879		      || ! label_is_jump_target_p (BB_HEAD (b),
1880						   BB_END (single_pred (b)))))
1881		{
1882		  rtx label = BB_HEAD (b);
1883
1884		  delete_insn_chain (label, label);
1885		  /* In the case label is undeletable, move it after the
1886		     BASIC_BLOCK note.  */
1887		  if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1888		    {
1889		      rtx bb_note = NEXT_INSN (BB_HEAD (b));
1890
1891		      reorder_insns_nobb (label, label, bb_note);
1892		      BB_HEAD (b) = bb_note;
1893		    }
1894		  if (dump_file)
1895		    fprintf (dump_file, "Deleted label in block %i.\n",
1896			     b->index);
1897		}
1898
1899	      /* If we fall through an empty block, we can remove it.  */
1900	      if (!(mode & CLEANUP_CFGLAYOUT)
1901		  && single_pred_p (b)
1902		  && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1903		  && !LABEL_P (BB_HEAD (b))
1904		  && FORWARDER_BLOCK_P (b)
1905		  /* Note that forwarder_block_p true ensures that
1906		     there is a successor for this block.  */
1907		  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1908		  && n_basic_blocks > 1)
1909		{
1910		  if (dump_file)
1911		    fprintf (dump_file,
1912			     "Deleting fallthru block %i.\n",
1913			     b->index);
1914
1915		  c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1916		  redirect_edge_succ_nodup (single_pred_edge (b),
1917					    single_succ (b));
1918		  delete_basic_block (b);
1919		  changed = true;
1920		  b = c;
1921		}
1922
1923	      if (single_succ_p (b)
1924		  && (s = single_succ_edge (b))
1925		  && !(s->flags & EDGE_COMPLEX)
1926		  && (c = s->dest) != EXIT_BLOCK_PTR
1927		  && single_pred_p (c)
1928		  && b != c)
1929		{
1930		  /* When not in cfg_layout mode use code aware of reordering
1931		     INSN.  This code possibly creates new basic blocks so it
1932		     does not fit merge_blocks interface and is kept here in
1933		     hope that it will become useless once more of compiler
1934		     is transformed to use cfg_layout mode.  */
1935
1936		  if ((mode & CLEANUP_CFGLAYOUT)
1937		      && can_merge_blocks_p (b, c))
1938		    {
1939		      merge_blocks (b, c);
1940		      update_forwarder_flag (b);
1941		      changed_here = true;
1942		    }
1943		  else if (!(mode & CLEANUP_CFGLAYOUT)
1944			   /* If the jump insn has side effects,
1945			      we can't kill the edge.  */
1946			   && (!JUMP_P (BB_END (b))
1947			       || (reload_completed
1948				   ? simplejump_p (BB_END (b))
1949				   : (onlyjump_p (BB_END (b))
1950				      && !tablejump_p (BB_END (b),
1951						       NULL, NULL))))
1952			   && (next = merge_blocks_move (s, b, c, mode)))
1953		      {
1954			b = next;
1955			changed_here = true;
1956		      }
1957		}
1958
1959	      /* Simplify branch over branch.  */
1960	      if ((mode & CLEANUP_EXPENSIVE)
1961		   && !(mode & CLEANUP_CFGLAYOUT)
1962		   && try_simplify_condjump (b))
1963		changed_here = true;
1964
1965	      /* If B has a single outgoing edge, but uses a
1966		 non-trivial jump instruction without side-effects, we
1967		 can either delete the jump entirely, or replace it
1968		 with a simple unconditional jump.  */
1969	      if (single_succ_p (b)
1970		  && single_succ (b) != EXIT_BLOCK_PTR
1971		  && onlyjump_p (BB_END (b))
1972		  && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1973		  && try_redirect_by_replacing_jump (single_succ_edge (b),
1974						     single_succ (b),
1975						     (mode & CLEANUP_CFGLAYOUT) != 0))
1976		{
1977		  update_forwarder_flag (b);
1978		  changed_here = true;
1979		}
1980
1981	      /* Simplify branch to branch.  */
1982	      if (try_forward_edges (mode, b))
1983		changed_here = true;
1984
1985	      /* Look for shared code between blocks.  */
1986	      if ((mode & CLEANUP_CROSSJUMP)
1987		  && try_crossjump_bb (mode, b))
1988		changed_here = true;
1989
1990	      /* Don't get confused by the index shift caused by
1991		 deleting blocks.  */
1992	      if (!changed_here)
1993		b = b->next_bb;
1994	      else
1995		changed = true;
1996	    }
1997
1998	  if ((mode & CLEANUP_CROSSJUMP)
1999	      && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2000	    changed = true;
2001
2002#ifdef ENABLE_CHECKING
2003	  if (changed)
2004	    verify_flow_info ();
2005#endif
2006
2007	  changed_overall |= changed;
2008	  first_pass = false;
2009	}
2010      while (changed);
2011    }
2012
2013  if (mode & CLEANUP_CROSSJUMP)
2014    remove_fake_exit_edges ();
2015
2016  FOR_ALL_BB (b)
2017    b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2018
2019  return changed_overall;
2020}
2021
2022/* Delete all unreachable basic blocks.  */
2023
2024bool
2025delete_unreachable_blocks (void)
2026{
2027  bool changed = false;
2028  basic_block b, next_bb;
2029
2030  find_unreachable_blocks ();
2031
2032  /* Delete all unreachable basic blocks.  */
2033
2034  for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2035    {
2036      next_bb = b->next_bb;
2037
2038      if (!(b->flags & BB_REACHABLE))
2039	{
2040	  delete_basic_block (b);
2041	  changed = true;
2042	}
2043    }
2044
2045  if (changed)
2046    tidy_fallthru_edges ();
2047  return changed;
2048}
2049
2050/* Merges sequential blocks if possible.  */
2051
2052bool
2053merge_seq_blocks (void)
2054{
2055  basic_block bb;
2056  bool changed = false;
2057
2058  for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2059    {
2060      if (single_succ_p (bb)
2061	  && can_merge_blocks_p (bb, single_succ (bb)))
2062	{
2063	  /* Merge the blocks and retry.  */
2064	  merge_blocks (bb, single_succ (bb));
2065	  changed = true;
2066	  continue;
2067	}
2068
2069      bb = bb->next_bb;
2070    }
2071
2072  return changed;
2073}
2074
2075/* Tidy the CFG by deleting unreachable code and whatnot.  */
2076
2077bool
2078cleanup_cfg (int mode)
2079{
2080  bool changed = false;
2081
2082  timevar_push (TV_CLEANUP_CFG);
2083  if (delete_unreachable_blocks ())
2084    {
2085      changed = true;
2086      /* We've possibly created trivially dead code.  Cleanup it right
2087	 now to introduce more opportunities for try_optimize_cfg.  */
2088      if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2089	  && !reload_completed)
2090	delete_trivially_dead_insns (get_insns(), max_reg_num ());
2091    }
2092
2093  compact_blocks ();
2094
2095  while (try_optimize_cfg (mode))
2096    {
2097      delete_unreachable_blocks (), changed = true;
2098      if (mode & CLEANUP_UPDATE_LIFE)
2099	{
2100	  /* Cleaning up CFG introduces more opportunities for dead code
2101	     removal that in turn may introduce more opportunities for
2102	     cleaning up the CFG.  */
2103	  if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2104						 PROP_DEATH_NOTES
2105						 | PROP_SCAN_DEAD_CODE
2106						 | PROP_KILL_DEAD_CODE
2107			  			 | ((mode & CLEANUP_LOG_LINKS)
2108						    ? PROP_LOG_LINKS : 0)))
2109	    break;
2110	}
2111      else if (!(mode & CLEANUP_NO_INSN_DEL)
2112	       && (mode & CLEANUP_EXPENSIVE)
2113	       && !reload_completed)
2114	{
2115	  if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2116	    break;
2117	}
2118      else
2119	break;
2120      delete_dead_jumptables ();
2121    }
2122
2123  timevar_pop (TV_CLEANUP_CFG);
2124
2125  return changed;
2126}
2127
2128static void
2129rest_of_handle_jump (void)
2130{
2131  delete_unreachable_blocks ();
2132
2133  if (cfun->tail_call_emit)
2134    fixup_tail_calls ();
2135}
2136
2137struct tree_opt_pass pass_jump =
2138{
2139  "sibling",                            /* name */
2140  NULL,                                 /* gate */
2141  rest_of_handle_jump,			/* execute */
2142  NULL,                                 /* sub */
2143  NULL,                                 /* next */
2144  0,                                    /* static_pass_number */
2145  TV_JUMP,                              /* tv_id */
2146  0,                                    /* properties_required */
2147  0,                                    /* properties_provided */
2148  0,                                    /* properties_destroyed */
2149  TODO_ggc_collect,                     /* todo_flags_start */
2150  TODO_dump_func |
2151  TODO_verify_flow,                     /* todo_flags_finish */
2152  'i'                                   /* letter */
2153};
2154
2155
2156static void
2157rest_of_handle_jump2 (void)
2158{
2159  /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB.  Do this
2160     before jump optimization switches branch directions.  */
2161  if (flag_guess_branch_prob)
2162    expected_value_to_br_prob ();
2163
2164  delete_trivially_dead_insns (get_insns (), max_reg_num ());
2165  reg_scan (get_insns (), max_reg_num ());
2166  if (dump_file)
2167    dump_flow_info (dump_file);
2168  cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
2169               | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2170
2171  create_loop_notes ();
2172
2173  purge_line_number_notes ();
2174
2175  if (optimize)
2176    cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
2177
2178  /* Jump optimization, and the removal of NULL pointer checks, may
2179     have reduced the number of instructions substantially.  CSE, and
2180     future passes, allocate arrays whose dimensions involve the
2181     maximum instruction UID, so if we can reduce the maximum UID
2182     we'll save big on memory.  */
2183  renumber_insns (dump_file);
2184}
2185
2186
2187struct tree_opt_pass pass_jump2 =
2188{
2189  "jump",                               /* name */
2190  NULL,                                 /* gate */
2191  rest_of_handle_jump2,			/* execute */
2192  NULL,                                 /* sub */
2193  NULL,                                 /* next */
2194  0,                                    /* static_pass_number */
2195  TV_JUMP,                              /* tv_id */
2196  0,                                    /* properties_required */
2197  0,                                    /* properties_provided */
2198  0,                                    /* properties_destroyed */
2199  TODO_ggc_collect,                     /* todo_flags_start */
2200  TODO_dump_func,                       /* todo_flags_finish */
2201  'j'                                   /* letter */
2202};
2203
2204
2205