1/* Instruction scheduling pass.
2   Copyright (C) 1992-2015 Free Software Foundation, Inc.
3   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4   and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3.  If not see
20<http://www.gnu.org/licenses/>.  */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "diagnostic-core.h"
27#include "rtl.h"
28#include "tm_p.h"
29#include "hard-reg-set.h"
30#include "regs.h"
31#include "hashtab.h"
32#include "hash-set.h"
33#include "vec.h"
34#include "machmode.h"
35#include "input.h"
36#include "function.h"
37#include "profile.h"
38#include "flags.h"
39#include "insn-config.h"
40#include "insn-attr.h"
41#include "except.h"
42#include "recog.h"
43#include "params.h"
44#include "dominance.h"
45#include "cfg.h"
46#include "cfgrtl.h"
47#include "cfgbuild.h"
48#include "predict.h"
49#include "basic-block.h"
50#include "sched-int.h"
51#include "target.h"
52
53
54#ifdef INSN_SCHEDULING
55
56/* The number of insns to be scheduled in total.  */
57static int rgn_n_insns;
58
59/* The number of insns scheduled so far.  */
60static int sched_rgn_n_insns;
61
62/* Set of blocks, that already have their dependencies calculated.  */
63static bitmap_head dont_calc_deps;
64
65/* Last basic block in current ebb.  */
66static basic_block last_bb;
67
68/* Implementations of the sched_info functions for region scheduling.  */
69static void init_ready_list (void);
70static void begin_schedule_ready (rtx_insn *);
71static int schedule_more_p (void);
72static const char *ebb_print_insn (const rtx_insn *, int);
73static int rank (rtx_insn *, rtx_insn *);
74static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
75static basic_block earliest_block_with_similiar_load (basic_block, rtx);
76static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
77static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
78
79static void ebb_add_remove_insn (rtx_insn *, int);
80static void ebb_add_block (basic_block, basic_block);
81static basic_block advance_target_bb (basic_block, rtx_insn *);
82static void ebb_fix_recovery_cfg (int, int, int);
83
84/* Allocate memory and store the state of the frontend.  Return the allocated
85   memory.  */
86static void *
87save_ebb_state (void)
88{
89  int *p = XNEW (int);
90  *p = sched_rgn_n_insns;
91  return p;
92}
93
94/* Restore the state of the frontend from P_, then free it.  */
95static void
96restore_ebb_state (void *p_)
97{
98  int *p = (int *)p_;
99  sched_rgn_n_insns = *p;
100  free (p_);
101}
102
103/* Return nonzero if there are more insns that should be scheduled.  */
104
105static int
106schedule_more_p (void)
107{
108  return sched_rgn_n_insns < rgn_n_insns;
109}
110
111/* Print dependency information about ebb between HEAD and TAIL.  */
112static void
113debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
114{
115  fprintf (sched_dump,
116	   ";;   --------------- forward dependences: ------------ \n");
117
118  fprintf (sched_dump, "\n;;   --- EBB Dependences --- from bb%d to bb%d \n",
119	   BLOCK_NUM (head), BLOCK_NUM (tail));
120
121  debug_dependencies (head, tail);
122}
123
124/* Add all insns that are initially ready to the ready list READY.  Called
125   once before scheduling a set of insns.  */
126
127static void
128init_ready_list (void)
129{
130  int n = 0;
131  rtx_insn *prev_head = current_sched_info->prev_head;
132  rtx_insn *next_tail = current_sched_info->next_tail;
133  rtx_insn *insn;
134
135  sched_rgn_n_insns = 0;
136
137  /* Print debugging information.  */
138  if (sched_verbose >= 5)
139    debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
140
141  /* Initialize ready list with all 'ready' insns in target block.
142     Count number of insns in the target block being scheduled.  */
143  for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
144    {
145      try_ready (insn);
146      n++;
147    }
148
149  gcc_assert (n == rgn_n_insns);
150}
151
152/* INSN is being scheduled after LAST.  Update counters.  */
153static void
154begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
155{
156  sched_rgn_n_insns++;
157}
158
159/* INSN is being moved to its place in the schedule, after LAST.  */
160static void
161begin_move_insn (rtx_insn *insn, rtx_insn *last)
162{
163  if (BLOCK_FOR_INSN (insn) == last_bb
164      /* INSN is a jump in the last block, ...  */
165      && control_flow_insn_p (insn)
166      /* that is going to be moved over some instructions.  */
167      && last != PREV_INSN (insn))
168    {
169      edge e;
170      basic_block bb;
171
172      /* An obscure special case, where we do have partially dead
173	 instruction scheduled after last control flow instruction.
174	 In this case we can create new basic block.  It is
175	 always exactly one basic block last in the sequence.  */
176
177      e = find_fallthru_edge (last_bb->succs);
178
179      gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
180
181      gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
182			   && !IS_SPECULATION_CHECK_P (insn)
183			   && BB_HEAD (last_bb) != insn
184			   && BB_END (last_bb) == insn);
185
186      {
187	rtx x;
188
189	x = NEXT_INSN (insn);
190	if (e)
191	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
192	else
193	  gcc_checking_assert (BARRIER_P (x));
194      }
195
196      if (e)
197	{
198	  bb = split_edge (e);
199	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
200	}
201      else
202	{
203	  /* Create an empty unreachable block after the INSN.  */
204	  rtx_insn *next = NEXT_INSN (insn);
205	  if (next && BARRIER_P (next))
206	    next = NEXT_INSN (next);
207	  bb = create_basic_block (next, NULL_RTX, last_bb);
208	}
209
210      /* split_edge () creates BB before E->DEST.  Keep in mind, that
211	 this operation extends scheduling region till the end of BB.
212	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
213	 of the scheduling region.  */
214      current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
215      gcc_assert (current_sched_info->next_tail);
216
217      /* Append new basic block to the end of the ebb.  */
218      sched_init_only_bb (bb, last_bb);
219      gcc_assert (last_bb == bb);
220    }
221}
222
223/* Return a string that contains the insn uid and optionally anything else
224   necessary to identify this insn in an output.  It's valid to use a
225   static buffer for this.  The ALIGNED parameter should cause the string
226   to be formatted so that multiple output lines will line up nicely.  */
227
228static const char *
229ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
230{
231  static char tmp[80];
232
233  /* '+' before insn means it is a new cycle start.  */
234  if (GET_MODE (insn) == TImode)
235    sprintf (tmp, "+ %4d", INSN_UID (insn));
236  else
237    sprintf (tmp, "  %4d", INSN_UID (insn));
238
239  return tmp;
240}
241
242/* Compare priority of two insns.  Return a positive number if the second
243   insn is to be preferred for scheduling, and a negative one if the first
244   is to be preferred.  Zero if they are equally good.  */
245
246static int
247rank (rtx_insn *insn1, rtx_insn *insn2)
248{
249  basic_block bb1 = BLOCK_FOR_INSN (insn1);
250  basic_block bb2 = BLOCK_FOR_INSN (insn2);
251
252  if (bb1->count > bb2->count
253      || bb1->frequency > bb2->frequency)
254    return -1;
255  if (bb1->count < bb2->count
256      || bb1->frequency < bb2->frequency)
257    return 1;
258  return 0;
259}
260
261/* NEXT is an instruction that depends on INSN (a backward dependence);
262   return nonzero if we should include this dependence in priority
263   calculations.  */
264
265static int
266ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
267                             rtx_insn *insn ATTRIBUTE_UNUSED)
268{
269  return 1;
270}
271
272 /* INSN is a JUMP_INSN.  Store the set of registers that
273    must be considered as used by this jump in USED.  */
274
275void
276ebb_compute_jump_reg_dependencies (rtx insn, regset used)
277{
278  basic_block b = BLOCK_FOR_INSN (insn);
279  edge e;
280  edge_iterator ei;
281
282  FOR_EACH_EDGE (e, ei, b->succs)
283    if ((e->flags & EDGE_FALLTHRU) == 0)
284      bitmap_ior_into (used, df_get_live_in (e->dest));
285}
286
287/* Used in schedule_insns to initialize current_sched_info for scheduling
288   regions (or single basic blocks).  */
289
290static struct common_sched_info_def ebb_common_sched_info;
291
292static struct sched_deps_info_def ebb_sched_deps_info =
293  {
294    ebb_compute_jump_reg_dependencies,
295    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
296    NULL,
297    1, 0, 0
298  };
299
300static struct haifa_sched_info ebb_sched_info =
301{
302  init_ready_list,
303  NULL,
304  schedule_more_p,
305  NULL,
306  rank,
307  ebb_print_insn,
308  ebb_contributes_to_priority,
309  NULL, /* insn_finishes_block_p */
310
311  NULL, NULL,
312  NULL, NULL,
313  1, 0,
314
315  ebb_add_remove_insn,
316  begin_schedule_ready,
317  begin_move_insn,
318  advance_target_bb,
319
320  save_ebb_state,
321  restore_ebb_state,
322
323  SCHED_EBB
324  /* We can create new blocks in begin_schedule_ready ().  */
325  | NEW_BBS
326};
327
328/* Returns the earliest block in EBB currently being processed where a
329   "similar load" 'insn2' is found, and hence LOAD_INSN can move
330   speculatively into the found block.  All the following must hold:
331
332   (1) both loads have 1 base register (PFREE_CANDIDATEs).
333   (2) load_insn and load2 have a def-use dependence upon
334   the same insn 'insn1'.
335
336   From all these we can conclude that the two loads access memory
337   addresses that differ at most by a constant, and hence if moving
338   load_insn would cause an exception, it would have been caused by
339   load2 anyhow.
340
341   The function uses list (given by LAST_BLOCK) of already processed
342   blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
343
344static basic_block
345earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
346{
347  sd_iterator_def back_sd_it;
348  dep_t back_dep;
349  basic_block bb, earliest_block = NULL;
350
351  FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
352    {
353      rtx_insn *insn1 = DEP_PRO (back_dep);
354
355      if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
356	/* Found a DEF-USE dependence (insn1, load_insn).  */
357	{
358	  sd_iterator_def fore_sd_it;
359	  dep_t fore_dep;
360
361	  FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
362	    {
363	      rtx_insn *insn2 = DEP_CON (fore_dep);
364	      basic_block insn2_block = BLOCK_FOR_INSN (insn2);
365
366	      if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
367		{
368		  if (earliest_block != NULL
369		      && earliest_block->index < insn2_block->index)
370		    continue;
371
372		  /* Found a DEF-USE dependence (insn1, insn2).  */
373		  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
374		    /* insn2 not guaranteed to be a 1 base reg load.  */
375		    continue;
376
377		  for (bb = last_block; bb; bb = (basic_block) bb->aux)
378		    if (insn2_block == bb)
379		      break;
380
381		  if (!bb)
382		    /* insn2 is the similar load.  */
383		    earliest_block = insn2_block;
384		}
385	    }
386	}
387    }
388
389  return earliest_block;
390}
391
392/* The following function adds dependencies between jumps and risky
393   insns in given ebb.  */
394
395static void
396add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
397{
398  rtx_insn *insn, *prev;
399  int classification;
400  rtx_insn *last_jump = NULL;
401  rtx_insn *next_tail = NEXT_INSN (tail);
402  basic_block last_block = NULL, bb;
403
404  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
405    {
406      add_delay_dependencies (insn);
407      if (control_flow_insn_p (insn))
408	{
409	  bb = BLOCK_FOR_INSN (insn);
410	  bb->aux = last_block;
411	  last_block = bb;
412	  /* Ensure blocks stay in the same order.  */
413	  if (last_jump)
414	    add_dependence (insn, last_jump, REG_DEP_ANTI);
415	  last_jump = insn;
416	}
417      else if (INSN_P (insn) && last_jump != NULL_RTX)
418	{
419	  classification = haifa_classify_insn (insn);
420	  prev = last_jump;
421
422	  switch (classification)
423	    {
424	    case PFREE_CANDIDATE:
425	      if (flag_schedule_speculative_load)
426		{
427		  bb = earliest_block_with_similiar_load (last_block, insn);
428		  if (bb)
429		    {
430		      bb = (basic_block) bb->aux;
431		      if (!bb)
432			break;
433		      prev = BB_END (bb);
434		    }
435		}
436	      /* Fall through.  */
437	    case TRAP_RISKY:
438	    case IRISKY:
439	    case PRISKY_CANDIDATE:
440	      /* ??? We could implement better checking PRISKY_CANDIDATEs
441		 analogous to sched-rgn.c.  */
442	      /* We can not change the mode of the backward
443		 dependency because REG_DEP_ANTI has the lowest
444		 rank.  */
445	      if (! sched_insns_conditions_mutex_p (insn, prev))
446		{
447		  if ((current_sched_info->flags & DO_SPECULATION)
448		      && (spec_info->mask & BEGIN_CONTROL))
449		    {
450		      dep_def _dep, *dep = &_dep;
451
452		      init_dep (dep, prev, insn, REG_DEP_ANTI);
453
454		      if (current_sched_info->flags & USE_DEPS_LIST)
455			{
456			  DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
457							   MAX_DEP_WEAK);
458
459			}
460		      sd_add_or_update_dep (dep, false);
461		    }
462		  else
463		    add_dependence (insn, prev, REG_DEP_CONTROL);
464		}
465
466	      break;
467
468	    default:
469	      break;
470	    }
471	}
472    }
473  /* Maintain the invariant that bb->aux is clear after use.  */
474  while (last_block)
475    {
476      bb = (basic_block) last_block->aux;
477      last_block->aux = NULL;
478      last_block = bb;
479    }
480}
481
482/* Schedule a single extended basic block, defined by the boundaries
483   HEAD and TAIL.
484
485   We change our expectations about scheduler behaviour depending on
486   whether MODULO_SCHEDULING is true.  If it is, we expect that the
487   caller has already called set_modulo_params and created delay pairs
488   as appropriate.  If the modulo schedule failed, we return
489   NULL_RTX.  */
490
491basic_block
492schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
493{
494  basic_block first_bb, target_bb;
495  struct deps_desc tmp_deps;
496  bool success;
497
498  /* Blah.  We should fix the rest of the code not to get confused by
499     a note or two.  */
500  while (head != tail)
501    {
502      if (NOTE_P (head) || DEBUG_INSN_P (head))
503	head = NEXT_INSN (head);
504      else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
505	tail = PREV_INSN (tail);
506      else if (LABEL_P (head))
507	head = NEXT_INSN (head);
508      else
509	break;
510    }
511
512  first_bb = BLOCK_FOR_INSN (head);
513  last_bb = BLOCK_FOR_INSN (tail);
514
515  if (no_real_insns_p (head, tail))
516    return BLOCK_FOR_INSN (tail);
517
518  gcc_assert (INSN_P (head) && INSN_P (tail));
519
520  if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
521    {
522      init_deps_global ();
523
524      /* Compute dependencies.  */
525      init_deps (&tmp_deps, false);
526      sched_analyze (&tmp_deps, head, tail);
527      free_deps (&tmp_deps);
528
529      add_deps_for_risky_insns (head, tail);
530
531      if (targetm.sched.dependencies_evaluation_hook)
532        targetm.sched.dependencies_evaluation_hook (head, tail);
533
534      finish_deps_global ();
535    }
536  else
537    /* Only recovery blocks can have their dependencies already calculated,
538       and they always are single block ebbs.  */
539    gcc_assert (first_bb == last_bb);
540
541  /* Set priorities.  */
542  current_sched_info->sched_max_insns_priority = 0;
543  rgn_n_insns = set_priorities (head, tail);
544  current_sched_info->sched_max_insns_priority++;
545
546  current_sched_info->prev_head = PREV_INSN (head);
547  current_sched_info->next_tail = NEXT_INSN (tail);
548
549  remove_notes (head, tail);
550
551  unlink_bb_notes (first_bb, last_bb);
552
553  target_bb = first_bb;
554
555  /* Make ready list big enough to hold all the instructions from the ebb.  */
556  sched_extend_ready_list (rgn_n_insns);
557  success = schedule_block (&target_bb, NULL);
558  gcc_assert (success || modulo_scheduling);
559
560  /* Free ready list.  */
561  sched_finish_ready_list ();
562
563  /* We might pack all instructions into fewer blocks,
564     so we may made some of them empty.  Can't assert (b == last_bb).  */
565
566  /* Sanity check: verify that all region insns were scheduled.  */
567  gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
568
569  /* Free dependencies.  */
570  sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
571
572  gcc_assert (haifa_recovery_bb_ever_added_p
573	      || deps_pools_are_empty_p ());
574
575  if (EDGE_COUNT (last_bb->preds) == 0)
576    /* LAST_BB is unreachable.  */
577    {
578      gcc_assert (first_bb != last_bb
579		  && EDGE_COUNT (last_bb->succs) == 0);
580      last_bb = last_bb->prev_bb;
581      delete_basic_block (last_bb->next_bb);
582    }
583
584  return success ? last_bb : NULL;
585}
586
587/* Perform initializations before running schedule_ebbs or a single
588   schedule_ebb.  */
589void
590schedule_ebbs_init (void)
591{
592  /* Setup infos.  */
593  {
594    memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
595	    sizeof (ebb_common_sched_info));
596
597    ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
598    ebb_common_sched_info.add_block = ebb_add_block;
599    ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
600
601    common_sched_info = &ebb_common_sched_info;
602    sched_deps_info = &ebb_sched_deps_info;
603    current_sched_info = &ebb_sched_info;
604  }
605
606  haifa_sched_init ();
607
608  compute_bb_for_insn ();
609
610  /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
611  bitmap_initialize (&dont_calc_deps, 0);
612  bitmap_clear (&dont_calc_deps);
613}
614
615/* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb.  */
616void
617schedule_ebbs_finish (void)
618{
619  bitmap_clear (&dont_calc_deps);
620
621  /* Reposition the prologue and epilogue notes in case we moved the
622     prologue/epilogue insns.  */
623  if (reload_completed)
624    reposition_prologue_and_epilogue_notes ();
625
626  haifa_sched_finish ();
627}
628
629/* The main entry point in this file.  */
630
631void
632schedule_ebbs (void)
633{
634  basic_block bb;
635  int probability_cutoff;
636  rtx_insn *tail;
637
638  /* Taking care of this degenerate case makes the rest of
639     this code simpler.  */
640  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
641    return;
642
643  if (profile_info && flag_branch_probabilities)
644    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
645  else
646    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
647  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
648
649  schedule_ebbs_init ();
650
651  /* Schedule every region in the subroutine.  */
652  FOR_EACH_BB_FN (bb, cfun)
653    {
654      rtx_insn *head = BB_HEAD (bb);
655
656      if (bb->flags & BB_DISABLE_SCHEDULE)
657	continue;
658
659      for (;;)
660	{
661	  edge e;
662	  tail = BB_END (bb);
663	  if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
664	      || LABEL_P (BB_HEAD (bb->next_bb)))
665	    break;
666	  e = find_fallthru_edge (bb->succs);
667	  if (! e)
668	    break;
669	  if (e->probability <= probability_cutoff)
670	    break;
671	  if (e->dest->flags & BB_DISABLE_SCHEDULE)
672 	    break;
673	  bb = bb->next_bb;
674	}
675
676      bb = schedule_ebb (head, tail, false);
677    }
678  schedule_ebbs_finish ();
679}
680
681/* INSN has been added to/removed from current ebb.  */
682static void
683ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
684{
685  if (!remove_p)
686    rgn_n_insns++;
687  else
688    rgn_n_insns--;
689}
690
691/* BB was added to ebb after AFTER.  */
692static void
693ebb_add_block (basic_block bb, basic_block after)
694{
695  /* Recovery blocks are always bounded by BARRIERS,
696     therefore, they always form single block EBB,
697     therefore, we can use rec->index to identify such EBBs.  */
698  if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
699    bitmap_set_bit (&dont_calc_deps, bb->index);
700  else if (after == last_bb)
701    last_bb = bb;
702}
703
704/* Return next block in ebb chain.  For parameter meaning please refer to
705   sched-int.h: struct sched_info: advance_target_bb.  */
706static basic_block
707advance_target_bb (basic_block bb, rtx_insn *insn)
708{
709  if (insn)
710    {
711      if (BLOCK_FOR_INSN (insn) != bb
712	  && control_flow_insn_p (insn)
713	  /* We handle interblock movement of the speculation check
714	     or over a speculation check in
715	     haifa-sched.c: move_block_after_check ().  */
716	  && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
717	  && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
718	{
719	  /* Assert that we don't move jumps across blocks.  */
720	  gcc_assert (!control_flow_insn_p (BB_END (bb))
721		      && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
722	  return bb;
723	}
724      else
725	return 0;
726    }
727  else
728    /* Return next non empty block.  */
729    {
730      do
731	{
732	  gcc_assert (bb != last_bb);
733
734	  bb = bb->next_bb;
735	}
736      while (bb_note (bb) == BB_END (bb));
737
738      return bb;
739    }
740}
741
742/* Fix internal data after interblock movement of jump instruction.
743   For parameter meaning please refer to
744   sched-int.h: struct sched_info: fix_recovery_cfg.  */
745static void
746ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
747		      int jump_bb_nexti)
748{
749  gcc_assert (last_bb->index != bbi);
750
751  if (jump_bb_nexti == last_bb->index)
752    last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
753}
754
755#endif /* INSN_SCHEDULING */
756