1/* Instruction scheduling pass.  Selective scheduler and pipeliner.
2   Copyright (C) 2006-2015 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "diagnostic-core.h"
25#include "rtl.h"
26#include "tm_p.h"
27#include "hard-reg-set.h"
28#include "regs.h"
29#include "hashtab.h"
30#include "hash-set.h"
31#include "vec.h"
32#include "machmode.h"
33#include "input.h"
34#include "function.h"
35#include "predict.h"
36#include "dominance.h"
37#include "cfg.h"
38#include "cfgrtl.h"
39#include "cfganal.h"
40#include "cfgbuild.h"
41#include "basic-block.h"
42#include "flags.h"
43#include "insn-config.h"
44#include "insn-attr.h"
45#include "except.h"
46#include "recog.h"
47#include "params.h"
48#include "target.h"
49#include "sched-int.h"
50#include "ggc.h"
51#include "symtab.h"
52#include "wide-int.h"
53#include "inchash.h"
54#include "tree.h"
55#include "langhooks.h"
56#include "rtlhooks-def.h"
57#include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
58
59#ifdef INSN_SCHEDULING
60#include "sel-sched-ir.h"
61/* We don't have to use it except for sel_print_insn.  */
62#include "sel-sched-dump.h"
63
64/* A vector holding bb info for whole scheduling pass.  */
65vec<sel_global_bb_info_def>
66    sel_global_bb_info = vNULL;
67
68/* A vector holding bb info.  */
69vec<sel_region_bb_info_def>
70    sel_region_bb_info = vNULL;
71
72/* A pool for allocating all lists.  */
73alloc_pool sched_lists_pool;
74
75/* This contains information about successors for compute_av_set.  */
76struct succs_info current_succs;
77
78/* Data structure to describe interaction with the generic scheduler utils.  */
79static struct common_sched_info_def sel_common_sched_info;
80
81/* The loop nest being pipelined.  */
82struct loop *current_loop_nest;
83
84/* LOOP_NESTS is a vector containing the corresponding loop nest for
85   each region.  */
86static vec<loop_p> loop_nests = vNULL;
87
88/* Saves blocks already in loop regions, indexed by bb->index.  */
89static sbitmap bbs_in_loop_rgns = NULL;
90
91/* CFG hooks that are saved before changing create_basic_block hook.  */
92static struct cfg_hooks orig_cfg_hooks;
93
94
95/* Array containing reverse topological index of function basic blocks,
96   indexed by BB->INDEX.  */
97static int *rev_top_order_index = NULL;
98
99/* Length of the above array.  */
100static int rev_top_order_index_len = -1;
101
102/* A regset pool structure.  */
103static struct
104{
105  /* The stack to which regsets are returned.  */
106  regset *v;
107
108  /* Its pointer.  */
109  int n;
110
111  /* Its size.  */
112  int s;
113
114  /* In VV we save all generated regsets so that, when destructing the
115     pool, we can compare it with V and check that every regset was returned
116     back to pool.  */
117  regset *vv;
118
119  /* The pointer of VV stack.  */
120  int nn;
121
122  /* Its size.  */
123  int ss;
124
125  /* The difference between allocated and returned regsets.  */
126  int diff;
127} regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
128
129/* This represents the nop pool.  */
130static struct
131{
132  /* The vector which holds previously emitted nops.  */
133  insn_t *v;
134
135  /* Its pointer.  */
136  int n;
137
138  /* Its size.  */
139  int s;
140} nop_pool = { NULL, 0, 0 };
141
142/* The pool for basic block notes.  */
143static vec<rtx_note *> bb_note_pool;
144
145/* A NOP pattern used to emit placeholder insns.  */
146rtx nop_pattern = NULL_RTX;
147/* A special instruction that resides in EXIT_BLOCK.
148   EXIT_INSN is successor of the insns that lead to EXIT_BLOCK.  */
149rtx_insn *exit_insn = NULL;
150
151/* TRUE if while scheduling current region, which is loop, its preheader
152   was removed.  */
153bool preheader_removed = false;
154
155
156/* Forward static declarations.  */
157static void fence_clear (fence_t);
158
159static void deps_init_id (idata_t, insn_t, bool);
160static void init_id_from_df (idata_t, insn_t, bool);
161static expr_t set_insn_init (expr_t, vinsn_t, int);
162
163static void cfg_preds (basic_block, insn_t **, int *);
164static void prepare_insn_expr (insn_t, int);
165static void free_history_vect (vec<expr_history_def> &);
166
167static void move_bb_info (basic_block, basic_block);
168static void remove_empty_bb (basic_block, bool);
169static void sel_merge_blocks (basic_block, basic_block);
170static void sel_remove_loop_preheader (void);
171static bool bb_has_removable_jump_to_p (basic_block, basic_block);
172
173static bool insn_is_the_only_one_in_bb_p (insn_t);
174static void create_initial_data_sets (basic_block);
175
176static void free_av_set (basic_block);
177static void invalidate_av_set (basic_block);
178static void extend_insn_data (void);
179static void sel_init_new_insn (insn_t, int, int = -1);
180static void finish_insns (void);
181
182/* Various list functions.  */
183
184/* Copy an instruction list L.  */
185ilist_t
186ilist_copy (ilist_t l)
187{
188  ilist_t head = NULL, *tailp = &head;
189
190  while (l)
191    {
192      ilist_add (tailp, ILIST_INSN (l));
193      tailp = &ILIST_NEXT (*tailp);
194      l = ILIST_NEXT (l);
195    }
196
197  return head;
198}
199
200/* Invert an instruction list L.  */
201ilist_t
202ilist_invert (ilist_t l)
203{
204  ilist_t res = NULL;
205
206  while (l)
207    {
208      ilist_add (&res, ILIST_INSN (l));
209      l = ILIST_NEXT (l);
210    }
211
212  return res;
213}
214
215/* Add a new boundary to the LP list with parameters TO, PTR, and DC.  */
216void
217blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
218{
219  bnd_t bnd;
220
221  _list_add (lp);
222  bnd = BLIST_BND (*lp);
223
224  BND_TO (bnd) = to;
225  BND_PTR (bnd) = ptr;
226  BND_AV (bnd) = NULL;
227  BND_AV1 (bnd) = NULL;
228  BND_DC (bnd) = dc;
229}
230
231/* Remove the list note pointed to by LP.  */
232void
233blist_remove (blist_t *lp)
234{
235  bnd_t b = BLIST_BND (*lp);
236
237  av_set_clear (&BND_AV (b));
238  av_set_clear (&BND_AV1 (b));
239  ilist_clear (&BND_PTR (b));
240
241  _list_remove (lp);
242}
243
244/* Init a fence tail L.  */
245void
246flist_tail_init (flist_tail_t l)
247{
248  FLIST_TAIL_HEAD (l) = NULL;
249  FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
250}
251
252/* Try to find fence corresponding to INSN in L.  */
253fence_t
254flist_lookup (flist_t l, insn_t insn)
255{
256  while (l)
257    {
258      if (FENCE_INSN (FLIST_FENCE (l)) == insn)
259	return FLIST_FENCE (l);
260
261      l = FLIST_NEXT (l);
262    }
263
264  return NULL;
265}
266
267/* Init the fields of F before running fill_insns.  */
268static void
269init_fence_for_scheduling (fence_t f)
270{
271  FENCE_BNDS (f) = NULL;
272  FENCE_PROCESSED_P (f) = false;
273  FENCE_SCHEDULED_P (f) = false;
274}
275
276/* Add new fence consisting of INSN and STATE to the list pointed to by LP.  */
277static void
278flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
279           insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
280           int *ready_ticks, int ready_ticks_size, insn_t sched_next,
281           int cycle, int cycle_issued_insns, int issue_more,
282           bool starts_cycle_p, bool after_stall_p)
283{
284  fence_t f;
285
286  _list_add (lp);
287  f = FLIST_FENCE (*lp);
288
289  FENCE_INSN (f) = insn;
290
291  gcc_assert (state != NULL);
292  FENCE_STATE (f) = state;
293
294  FENCE_CYCLE (f) = cycle;
295  FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
296  FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
297  FENCE_AFTER_STALL_P (f) = after_stall_p;
298
299  gcc_assert (dc != NULL);
300  FENCE_DC (f) = dc;
301
302  gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
303  FENCE_TC (f) = tc;
304
305  FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
306  FENCE_ISSUE_MORE (f) = issue_more;
307  FENCE_EXECUTING_INSNS (f) = executing_insns;
308  FENCE_READY_TICKS (f) = ready_ticks;
309  FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
310  FENCE_SCHED_NEXT (f) = sched_next;
311
312  init_fence_for_scheduling (f);
313}
314
315/* Remove the head node of the list pointed to by LP.  */
316static void
317flist_remove (flist_t *lp)
318{
319  if (FENCE_INSN (FLIST_FENCE (*lp)))
320    fence_clear (FLIST_FENCE (*lp));
321  _list_remove (lp);
322}
323
324/* Clear the fence list pointed to by LP.  */
325void
326flist_clear (flist_t *lp)
327{
328  while (*lp)
329    flist_remove (lp);
330}
331
332/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL.  */
333void
334def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
335{
336  def_t d;
337
338  _list_add (dl);
339  d = DEF_LIST_DEF (*dl);
340
341  d->orig_insn = original_insn;
342  d->crosses_call = crosses_call;
343}
344
345
346/* Functions to work with target contexts.  */
347
348/* Bulk target context.  It is convenient for debugging purposes to ensure
349   that there are no uninitialized (null) target contexts.  */
350static tc_t bulk_tc = (tc_t) 1;
351
352/* Target hooks wrappers.  In the future we can provide some default
353   implementations for them.  */
354
355/* Allocate a store for the target context.  */
356static tc_t
357alloc_target_context (void)
358{
359  return (targetm.sched.alloc_sched_context
360	  ? targetm.sched.alloc_sched_context () : bulk_tc);
361}
362
363/* Init target context TC.
364   If CLEAN_P is true, then make TC as it is beginning of the scheduler.
365   Overwise, copy current backend context to TC.  */
366static void
367init_target_context (tc_t tc, bool clean_p)
368{
369  if (targetm.sched.init_sched_context)
370    targetm.sched.init_sched_context (tc, clean_p);
371}
372
373/* Allocate and initialize a target context.  Meaning of CLEAN_P is the same as
374   int init_target_context ().  */
375tc_t
376create_target_context (bool clean_p)
377{
378  tc_t tc = alloc_target_context ();
379
380  init_target_context (tc, clean_p);
381  return tc;
382}
383
384/* Copy TC to the current backend context.  */
385void
386set_target_context (tc_t tc)
387{
388  if (targetm.sched.set_sched_context)
389    targetm.sched.set_sched_context (tc);
390}
391
392/* TC is about to be destroyed.  Free any internal data.  */
393static void
394clear_target_context (tc_t tc)
395{
396  if (targetm.sched.clear_sched_context)
397    targetm.sched.clear_sched_context (tc);
398}
399
400/*  Clear and free it.  */
401static void
402delete_target_context (tc_t tc)
403{
404  clear_target_context (tc);
405
406  if (targetm.sched.free_sched_context)
407    targetm.sched.free_sched_context (tc);
408}
409
410/* Make a copy of FROM in TO.
411   NB: May be this should be a hook.  */
412static void
413copy_target_context (tc_t to, tc_t from)
414{
415  tc_t tmp = create_target_context (false);
416
417  set_target_context (from);
418  init_target_context (to, false);
419
420  set_target_context (tmp);
421  delete_target_context (tmp);
422}
423
424/* Create a copy of TC.  */
425static tc_t
426create_copy_of_target_context (tc_t tc)
427{
428  tc_t copy = alloc_target_context ();
429
430  copy_target_context (copy, tc);
431
432  return copy;
433}
434
435/* Clear TC and initialize it according to CLEAN_P.  The meaning of CLEAN_P
436   is the same as in init_target_context ().  */
437void
438reset_target_context (tc_t tc, bool clean_p)
439{
440  clear_target_context (tc);
441  init_target_context (tc, clean_p);
442}
443
444/* Functions to work with dependence contexts.
445   Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
446   context.  It accumulates information about processed insns to decide if
447   current insn is dependent on the processed ones.  */
448
449/* Make a copy of FROM in TO.  */
450static void
451copy_deps_context (deps_t to, deps_t from)
452{
453  init_deps (to, false);
454  deps_join (to, from);
455}
456
457/* Allocate store for dep context.  */
458static deps_t
459alloc_deps_context (void)
460{
461  return XNEW (struct deps_desc);
462}
463
464/* Allocate and initialize dep context.  */
465static deps_t
466create_deps_context (void)
467{
468  deps_t dc = alloc_deps_context ();
469
470  init_deps (dc, false);
471  return dc;
472}
473
474/* Create a copy of FROM.  */
475static deps_t
476create_copy_of_deps_context (deps_t from)
477{
478  deps_t to = alloc_deps_context ();
479
480  copy_deps_context (to, from);
481  return to;
482}
483
484/* Clean up internal data of DC.  */
485static void
486clear_deps_context (deps_t dc)
487{
488  free_deps (dc);
489}
490
491/* Clear and free DC.  */
492static void
493delete_deps_context (deps_t dc)
494{
495  clear_deps_context (dc);
496  free (dc);
497}
498
499/* Clear and init DC.  */
500static void
501reset_deps_context (deps_t dc)
502{
503  clear_deps_context (dc);
504  init_deps (dc, false);
505}
506
507/* This structure describes the dependence analysis hooks for advancing
508   dependence context.  */
509static struct sched_deps_info_def advance_deps_context_sched_deps_info =
510  {
511    NULL,
512
513    NULL, /* start_insn */
514    NULL, /* finish_insn */
515    NULL, /* start_lhs */
516    NULL, /* finish_lhs */
517    NULL, /* start_rhs */
518    NULL, /* finish_rhs */
519    haifa_note_reg_set,
520    haifa_note_reg_clobber,
521    haifa_note_reg_use,
522    NULL, /* note_mem_dep */
523    NULL, /* note_dep */
524
525    0, 0, 0
526  };
527
528/* Process INSN and add its impact on DC.  */
529void
530advance_deps_context (deps_t dc, insn_t insn)
531{
532  sched_deps_info = &advance_deps_context_sched_deps_info;
533  deps_analyze_insn (dc, insn);
534}
535
536
537/* Functions to work with DFA states.  */
538
539/* Allocate store for a DFA state.  */
540static state_t
541state_alloc (void)
542{
543  return xmalloc (dfa_state_size);
544}
545
546/* Allocate and initialize DFA state.  */
547static state_t
548state_create (void)
549{
550  state_t state = state_alloc ();
551
552  state_reset (state);
553  advance_state (state);
554  return state;
555}
556
557/* Free DFA state.  */
558static void
559state_free (state_t state)
560{
561  free (state);
562}
563
564/* Make a copy of FROM in TO.  */
565static void
566state_copy (state_t to, state_t from)
567{
568  memcpy (to, from, dfa_state_size);
569}
570
571/* Create a copy of FROM.  */
572static state_t
573state_create_copy (state_t from)
574{
575  state_t to = state_alloc ();
576
577  state_copy (to, from);
578  return to;
579}
580
581
582/* Functions to work with fences.  */
583
584/* Clear the fence.  */
585static void
586fence_clear (fence_t f)
587{
588  state_t s = FENCE_STATE (f);
589  deps_t dc = FENCE_DC (f);
590  void *tc = FENCE_TC (f);
591
592  ilist_clear (&FENCE_BNDS (f));
593
594  gcc_assert ((s != NULL && dc != NULL && tc != NULL)
595	      || (s == NULL && dc == NULL && tc == NULL));
596
597  free (s);
598
599  if (dc != NULL)
600    delete_deps_context (dc);
601
602  if (tc != NULL)
603    delete_target_context (tc);
604  vec_free (FENCE_EXECUTING_INSNS (f));
605  free (FENCE_READY_TICKS (f));
606  FENCE_READY_TICKS (f) = NULL;
607}
608
609/* Init a list of fences with successors of OLD_FENCE.  */
610void
611init_fences (insn_t old_fence)
612{
613  insn_t succ;
614  succ_iterator si;
615  bool first = true;
616  int ready_ticks_size = get_max_uid () + 1;
617
618  FOR_EACH_SUCC_1 (succ, si, old_fence,
619                   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
620    {
621
622      if (first)
623        first = false;
624      else
625        gcc_assert (flag_sel_sched_pipelining_outer_loops);
626
627      flist_add (&fences, succ,
628		 state_create (),
629		 create_deps_context () /* dc */,
630		 create_target_context (true) /* tc */,
631		 NULL /* last_scheduled_insn */,
632                 NULL, /* executing_insns */
633                 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
634                 ready_ticks_size,
635                 NULL /* sched_next */,
636		 1 /* cycle */, 0 /* cycle_issued_insns */,
637		 issue_rate, /* issue_more */
638		 1 /* starts_cycle_p */, 0 /* after_stall_p */);
639    }
640}
641
642/* Merges two fences (filling fields of fence F with resulting values) by
643   following rules: 1) state, target context and last scheduled insn are
644   propagated from fallthrough edge if it is available;
645   2) deps context and cycle is propagated from more probable edge;
646   3) all other fields are set to corresponding constant values.
647
648   INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
649   READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
650   and AFTER_STALL_P are the corresponding fields of the second fence.  */
651static void
652merge_fences (fence_t f, insn_t insn,
653	      state_t state, deps_t dc, void *tc,
654              rtx_insn *last_scheduled_insn,
655	      vec<rtx_insn *, va_gc> *executing_insns,
656              int *ready_ticks, int ready_ticks_size,
657	      rtx sched_next, int cycle, int issue_more, bool after_stall_p)
658{
659  insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
660
661  gcc_assert (sel_bb_head_p (FENCE_INSN (f))
662              && !sched_next && !FENCE_SCHED_NEXT (f));
663
664  /* Check if we can decide which path fences came.
665     If we can't (or don't want to) - reset all.  */
666  if (last_scheduled_insn == NULL
667      || last_scheduled_insn_old == NULL
668      /* This is a case when INSN is reachable on several paths from
669         one insn (this can happen when pipelining of outer loops is on and
670         there are two edges: one going around of inner loop and the other -
671         right through it; in such case just reset everything).  */
672      || last_scheduled_insn == last_scheduled_insn_old)
673    {
674      state_reset (FENCE_STATE (f));
675      state_free (state);
676
677      reset_deps_context (FENCE_DC (f));
678      delete_deps_context (dc);
679
680      reset_target_context (FENCE_TC (f), true);
681      delete_target_context (tc);
682
683      if (cycle > FENCE_CYCLE (f))
684        FENCE_CYCLE (f) = cycle;
685
686      FENCE_LAST_SCHEDULED_INSN (f) = NULL;
687      FENCE_ISSUE_MORE (f) = issue_rate;
688      vec_free (executing_insns);
689      free (ready_ticks);
690      if (FENCE_EXECUTING_INSNS (f))
691        FENCE_EXECUTING_INSNS (f)->block_remove (0,
692					  FENCE_EXECUTING_INSNS (f)->length ());
693      if (FENCE_READY_TICKS (f))
694        memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
695    }
696  else
697    {
698      edge edge_old = NULL, edge_new = NULL;
699      edge candidate;
700      succ_iterator si;
701      insn_t succ;
702
703      /* Find fallthrough edge.  */
704      gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
705      candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
706
707      if (!candidate
708          || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
709              && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
710        {
711          /* No fallthrough edge leading to basic block of INSN.  */
712          state_reset (FENCE_STATE (f));
713          state_free (state);
714
715          reset_target_context (FENCE_TC (f), true);
716          delete_target_context (tc);
717
718          FENCE_LAST_SCHEDULED_INSN (f) = NULL;
719	  FENCE_ISSUE_MORE (f) = issue_rate;
720        }
721      else
722        if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
723          {
724            /* Would be weird if same insn is successor of several fallthrough
725               edges.  */
726            gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
727                        != BLOCK_FOR_INSN (last_scheduled_insn_old));
728
729            state_free (FENCE_STATE (f));
730            FENCE_STATE (f) = state;
731
732            delete_target_context (FENCE_TC (f));
733            FENCE_TC (f) = tc;
734
735            FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
736	    FENCE_ISSUE_MORE (f) = issue_more;
737          }
738        else
739          {
740            /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched.  */
741            state_free (state);
742            delete_target_context (tc);
743
744            gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
745                        != BLOCK_FOR_INSN (last_scheduled_insn));
746          }
747
748        /* Find edge of first predecessor (last_scheduled_insn_old->insn).  */
749        FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
750                         SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
751          {
752            if (succ == insn)
753              {
754                /* No same successor allowed from several edges.  */
755                gcc_assert (!edge_old);
756                edge_old = si.e1;
757              }
758          }
759        /* Find edge of second predecessor (last_scheduled_insn->insn).  */
760        FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
761                         SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
762          {
763            if (succ == insn)
764              {
765                /* No same successor allowed from several edges.  */
766                gcc_assert (!edge_new);
767                edge_new = si.e1;
768              }
769          }
770
771        /* Check if we can choose most probable predecessor.  */
772        if (edge_old == NULL || edge_new == NULL)
773          {
774            reset_deps_context (FENCE_DC (f));
775            delete_deps_context (dc);
776            vec_free (executing_insns);
777            free (ready_ticks);
778
779            FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
780            if (FENCE_EXECUTING_INSNS (f))
781              FENCE_EXECUTING_INSNS (f)->block_remove (0,
782                                FENCE_EXECUTING_INSNS (f)->length ());
783            if (FENCE_READY_TICKS (f))
784              memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
785          }
786        else
787          if (edge_new->probability > edge_old->probability)
788            {
789              delete_deps_context (FENCE_DC (f));
790              FENCE_DC (f) = dc;
791              vec_free (FENCE_EXECUTING_INSNS (f));
792              FENCE_EXECUTING_INSNS (f) = executing_insns;
793              free (FENCE_READY_TICKS (f));
794              FENCE_READY_TICKS (f) = ready_ticks;
795              FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
796              FENCE_CYCLE (f) = cycle;
797            }
798          else
799            {
800              /* Leave DC and CYCLE untouched.  */
801              delete_deps_context (dc);
802              vec_free (executing_insns);
803              free (ready_ticks);
804            }
805    }
806
807  /* Fill remaining invariant fields.  */
808  if (after_stall_p)
809    FENCE_AFTER_STALL_P (f) = 1;
810
811  FENCE_ISSUED_INSNS (f) = 0;
812  FENCE_STARTS_CYCLE_P (f) = 1;
813  FENCE_SCHED_NEXT (f) = NULL;
814}
815
816/* Add a new fence to NEW_FENCES list, initializing it from all
817   other parameters.  */
818static void
819add_to_fences (flist_tail_t new_fences, insn_t insn,
820               state_t state, deps_t dc, void *tc,
821	       rtx_insn *last_scheduled_insn,
822               vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
823               int ready_ticks_size, rtx_insn *sched_next, int cycle,
824               int cycle_issued_insns, int issue_rate,
825	       bool starts_cycle_p, bool after_stall_p)
826{
827  fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
828
829  if (! f)
830    {
831      flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
832		 last_scheduled_insn, executing_insns, ready_ticks,
833                 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
834		 issue_rate, starts_cycle_p, after_stall_p);
835
836      FLIST_TAIL_TAILP (new_fences)
837	= &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
838    }
839  else
840    {
841      merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
842                    executing_insns, ready_ticks, ready_ticks_size,
843                    sched_next, cycle, issue_rate, after_stall_p);
844    }
845}
846
847/* Move the first fence in the OLD_FENCES list to NEW_FENCES.  */
848void
849move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
850{
851  fence_t f, old;
852  flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
853
854  old = FLIST_FENCE (old_fences);
855  f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
856                    FENCE_INSN (FLIST_FENCE (old_fences)));
857  if (f)
858    {
859      merge_fences (f, old->insn, old->state, old->dc, old->tc,
860                    old->last_scheduled_insn, old->executing_insns,
861                    old->ready_ticks, old->ready_ticks_size,
862                    old->sched_next, old->cycle, old->issue_more,
863                    old->after_stall_p);
864    }
865  else
866    {
867      _list_add (tailp);
868      FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
869      *FLIST_FENCE (*tailp) = *old;
870      init_fence_for_scheduling (FLIST_FENCE (*tailp));
871    }
872  FENCE_INSN (old) = NULL;
873}
874
875/* Add a new fence to NEW_FENCES list and initialize most of its data
876   as a clean one.  */
877void
878add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
879{
880  int ready_ticks_size = get_max_uid () + 1;
881
882  add_to_fences (new_fences,
883                 succ, state_create (), create_deps_context (),
884                 create_target_context (true),
885                 NULL, NULL,
886                 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
887                 NULL, FENCE_CYCLE (fence) + 1,
888                 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
889}
890
891/* Add a new fence to NEW_FENCES list and initialize all of its data
892   from FENCE and SUCC.  */
893void
894add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
895{
896  int * new_ready_ticks
897    = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
898
899  memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
900          FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
901  add_to_fences (new_fences,
902                 succ, state_create_copy (FENCE_STATE (fence)),
903                 create_copy_of_deps_context (FENCE_DC (fence)),
904                 create_copy_of_target_context (FENCE_TC (fence)),
905                 FENCE_LAST_SCHEDULED_INSN (fence),
906		 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
907                 new_ready_ticks,
908                 FENCE_READY_TICKS_SIZE (fence),
909                 FENCE_SCHED_NEXT (fence),
910                 FENCE_CYCLE (fence),
911                 FENCE_ISSUED_INSNS (fence),
912		 FENCE_ISSUE_MORE (fence),
913                 FENCE_STARTS_CYCLE_P (fence),
914                 FENCE_AFTER_STALL_P (fence));
915}
916
917
918/* Functions to work with regset and nop pools.  */
919
920/* Returns the new regset from pool.  It might have some of the bits set
921   from the previous usage.  */
922regset
923get_regset_from_pool (void)
924{
925  regset rs;
926
927  if (regset_pool.n != 0)
928    rs = regset_pool.v[--regset_pool.n];
929  else
930    /* We need to create the regset.  */
931    {
932      rs = ALLOC_REG_SET (&reg_obstack);
933
934      if (regset_pool.nn == regset_pool.ss)
935	regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
936                                     (regset_pool.ss = 2 * regset_pool.ss + 1));
937      regset_pool.vv[regset_pool.nn++] = rs;
938    }
939
940  regset_pool.diff++;
941
942  return rs;
943}
944
945/* Same as above, but returns the empty regset.  */
946regset
947get_clear_regset_from_pool (void)
948{
949  regset rs = get_regset_from_pool ();
950
951  CLEAR_REG_SET (rs);
952  return rs;
953}
954
955/* Return regset RS to the pool for future use.  */
956void
957return_regset_to_pool (regset rs)
958{
959  gcc_assert (rs);
960  regset_pool.diff--;
961
962  if (regset_pool.n == regset_pool.s)
963    regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
964                                (regset_pool.s = 2 * regset_pool.s + 1));
965  regset_pool.v[regset_pool.n++] = rs;
966}
967
968#ifdef ENABLE_CHECKING
969/* This is used as a qsort callback for sorting regset pool stacks.
970   X and XX are addresses of two regsets.  They are never equal.  */
971static int
972cmp_v_in_regset_pool (const void *x, const void *xx)
973{
974  uintptr_t r1 = (uintptr_t) *((const regset *) x);
975  uintptr_t r2 = (uintptr_t) *((const regset *) xx);
976  if (r1 > r2)
977    return 1;
978  else if (r1 < r2)
979    return -1;
980  gcc_unreachable ();
981}
982#endif
983
984/*  Free the regset pool possibly checking for memory leaks.  */
985void
986free_regset_pool (void)
987{
988#ifdef ENABLE_CHECKING
989  {
990    regset *v = regset_pool.v;
991    int i = 0;
992    int n = regset_pool.n;
993
994    regset *vv = regset_pool.vv;
995    int ii = 0;
996    int nn = regset_pool.nn;
997
998    int diff = 0;
999
1000    gcc_assert (n <= nn);
1001
1002    /* Sort both vectors so it will be possible to compare them.  */
1003    qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
1004    qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
1005
1006    while (ii < nn)
1007      {
1008        if (v[i] == vv[ii])
1009          i++;
1010        else
1011          /* VV[II] was lost.  */
1012          diff++;
1013
1014        ii++;
1015      }
1016
1017    gcc_assert (diff == regset_pool.diff);
1018  }
1019#endif
1020
1021  /* If not true - we have a memory leak.  */
1022  gcc_assert (regset_pool.diff == 0);
1023
1024  while (regset_pool.n)
1025    {
1026      --regset_pool.n;
1027      FREE_REG_SET (regset_pool.v[regset_pool.n]);
1028    }
1029
1030  free (regset_pool.v);
1031  regset_pool.v = NULL;
1032  regset_pool.s = 0;
1033
1034  free (regset_pool.vv);
1035  regset_pool.vv = NULL;
1036  regset_pool.nn = 0;
1037  regset_pool.ss = 0;
1038
1039  regset_pool.diff = 0;
1040}
1041
1042
1043/* Functions to work with nop pools.  NOP insns are used as temporary
1044   placeholders of the insns being scheduled to allow correct update of
1045   the data sets.  When update is finished, NOPs are deleted.  */
1046
1047/* A vinsn that is used to represent a nop.  This vinsn is shared among all
1048   nops sel-sched generates.  */
1049static vinsn_t nop_vinsn = NULL;
1050
1051/* Emit a nop before INSN, taking it from pool.  */
1052insn_t
1053get_nop_from_pool (insn_t insn)
1054{
1055  rtx nop_pat;
1056  insn_t nop;
1057  bool old_p = nop_pool.n != 0;
1058  int flags;
1059
1060  if (old_p)
1061    nop_pat = nop_pool.v[--nop_pool.n];
1062  else
1063    nop_pat = nop_pattern;
1064
1065  nop = emit_insn_before (nop_pat, insn);
1066
1067  if (old_p)
1068    flags = INSN_INIT_TODO_SSID;
1069  else
1070    flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1071
1072  set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1073  sel_init_new_insn (nop, flags);
1074
1075  return nop;
1076}
1077
1078/* Remove NOP from the instruction stream and return it to the pool.  */
1079void
1080return_nop_to_pool (insn_t nop, bool full_tidying)
1081{
1082  gcc_assert (INSN_IN_STREAM_P (nop));
1083  sel_remove_insn (nop, false, full_tidying);
1084
1085  /* We'll recycle this nop.  */
1086  nop->set_undeleted ();
1087
1088  if (nop_pool.n == nop_pool.s)
1089    nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
1090                             (nop_pool.s = 2 * nop_pool.s + 1));
1091  nop_pool.v[nop_pool.n++] = nop;
1092}
1093
1094/* Free the nop pool.  */
1095void
1096free_nop_pool (void)
1097{
1098  nop_pool.n = 0;
1099  nop_pool.s = 0;
1100  free (nop_pool.v);
1101  nop_pool.v = NULL;
1102}
1103
1104
1105/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1106   The callback is given two rtxes XX and YY and writes the new rtxes
1107   to NX and NY in case some needs to be skipped.  */
1108static int
1109skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1110{
1111  const_rtx x = *xx;
1112  const_rtx y = *yy;
1113
1114  if (GET_CODE (x) == UNSPEC
1115      && (targetm.sched.skip_rtx_p == NULL
1116          || targetm.sched.skip_rtx_p (x)))
1117    {
1118      *nx = XVECEXP (x, 0, 0);
1119      *ny = CONST_CAST_RTX (y);
1120      return 1;
1121    }
1122
1123  if (GET_CODE (y) == UNSPEC
1124      && (targetm.sched.skip_rtx_p == NULL
1125          || targetm.sched.skip_rtx_p (y)))
1126    {
1127      *nx = CONST_CAST_RTX (x);
1128      *ny = XVECEXP (y, 0, 0);
1129      return 1;
1130    }
1131
1132  return 0;
1133}
1134
1135/* Callback, called from hash_rtx_cb.  Helps to hash UNSPEC rtx X in a correct way
1136   to support ia64 speculation.  When changes are needed, new rtx X and new mode
1137   NMODE are written, and the callback returns true.  */
1138static int
1139hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED,
1140                           rtx *nx, machine_mode* nmode)
1141{
1142  if (GET_CODE (x) == UNSPEC
1143      && targetm.sched.skip_rtx_p
1144      && targetm.sched.skip_rtx_p (x))
1145    {
1146      *nx = XVECEXP (x, 0 ,0);
1147      *nmode = VOIDmode;
1148      return 1;
1149    }
1150
1151  return 0;
1152}
1153
1154/* Returns LHS and RHS are ok to be scheduled separately.  */
1155static bool
1156lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1157{
1158  if (lhs == NULL || rhs == NULL)
1159    return false;
1160
1161  /* Do not schedule constants as rhs: no point to use reg, if const
1162     can be used.  Moreover, scheduling const as rhs may lead to mode
1163     mismatch cause consts don't have modes but they could be merged
1164     from branches where the same const used in different modes.  */
1165  if (CONSTANT_P (rhs))
1166    return false;
1167
1168  /* ??? Do not rename predicate registers to avoid ICEs in bundling.  */
1169  if (COMPARISON_P (rhs))
1170      return false;
1171
1172  /* Do not allow single REG to be an rhs.  */
1173  if (REG_P (rhs))
1174    return false;
1175
1176  /* See comment at find_used_regs_1 (*1) for explanation of this
1177     restriction.  */
1178  /* FIXME: remove this later.  */
1179  if (MEM_P (lhs))
1180    return false;
1181
1182  /* This will filter all tricky things like ZERO_EXTRACT etc.
1183     For now we don't handle it.  */
1184  if (!REG_P (lhs) && !MEM_P (lhs))
1185    return false;
1186
1187  return true;
1188}
1189
1190/* Initialize vinsn VI for INSN.  Only for use from vinsn_create ().  When
1191   FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable.  This is
1192   used e.g. for insns from recovery blocks.  */
1193static void
1194vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1195{
1196  hash_rtx_callback_function hrcf;
1197  int insn_class;
1198
1199  VINSN_INSN_RTX (vi) = insn;
1200  VINSN_COUNT (vi) = 0;
1201  vi->cost = -1;
1202
1203  if (INSN_NOP_P (insn))
1204    return;
1205
1206  if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1207    init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1208  else
1209    deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1210
1211  /* Hash vinsn depending on whether it is separable or not.  */
1212  hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1213  if (VINSN_SEPARABLE_P (vi))
1214    {
1215      rtx rhs = VINSN_RHS (vi);
1216
1217      VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1218                                     NULL, NULL, false, hrcf);
1219      VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1220                                         VOIDmode, NULL, NULL,
1221                                         false, hrcf);
1222    }
1223  else
1224    {
1225      VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1226                                     NULL, NULL, false, hrcf);
1227      VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1228    }
1229
1230  insn_class = haifa_classify_insn (insn);
1231  if (insn_class >= 2
1232      && (!targetm.sched.get_insn_spec_ds
1233          || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1234              == 0)))
1235    VINSN_MAY_TRAP_P (vi) = true;
1236  else
1237    VINSN_MAY_TRAP_P (vi) = false;
1238}
1239
1240/* Indicate that VI has become the part of an rtx object.  */
1241void
1242vinsn_attach (vinsn_t vi)
1243{
1244  /* Assert that VI is not pending for deletion.  */
1245  gcc_assert (VINSN_INSN_RTX (vi));
1246
1247  VINSN_COUNT (vi)++;
1248}
1249
1250/* Create and init VI from the INSN.  Use UNIQUE_P for determining the correct
1251   VINSN_TYPE (VI).  */
1252static vinsn_t
1253vinsn_create (insn_t insn, bool force_unique_p)
1254{
1255  vinsn_t vi = XCNEW (struct vinsn_def);
1256
1257  vinsn_init (vi, insn, force_unique_p);
1258  return vi;
1259}
1260
1261/* Return a copy of VI.  When REATTACH_P is true, detach VI and attach
1262   the copy.  */
1263vinsn_t
1264vinsn_copy (vinsn_t vi, bool reattach_p)
1265{
1266  rtx_insn *copy;
1267  bool unique = VINSN_UNIQUE_P (vi);
1268  vinsn_t new_vi;
1269
1270  copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1271  new_vi = create_vinsn_from_insn_rtx (copy, unique);
1272  if (reattach_p)
1273    {
1274      vinsn_detach (vi);
1275      vinsn_attach (new_vi);
1276    }
1277
1278  return new_vi;
1279}
1280
1281/* Delete the VI vinsn and free its data.  */
1282static void
1283vinsn_delete (vinsn_t vi)
1284{
1285  gcc_assert (VINSN_COUNT (vi) == 0);
1286
1287  if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1288    {
1289      return_regset_to_pool (VINSN_REG_SETS (vi));
1290      return_regset_to_pool (VINSN_REG_USES (vi));
1291      return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1292    }
1293
1294  free (vi);
1295}
1296
1297/* Indicate that VI is no longer a part of some rtx object.
1298   Remove VI if it is no longer needed.  */
1299void
1300vinsn_detach (vinsn_t vi)
1301{
1302  gcc_assert (VINSN_COUNT (vi) > 0);
1303
1304  if (--VINSN_COUNT (vi) == 0)
1305    vinsn_delete (vi);
1306}
1307
1308/* Returns TRUE if VI is a branch.  */
1309bool
1310vinsn_cond_branch_p (vinsn_t vi)
1311{
1312  insn_t insn;
1313
1314  if (!VINSN_UNIQUE_P (vi))
1315    return false;
1316
1317  insn = VINSN_INSN_RTX (vi);
1318  if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1319    return false;
1320
1321  return control_flow_insn_p (insn);
1322}
1323
1324/* Return latency of INSN.  */
1325static int
1326sel_insn_rtx_cost (rtx_insn *insn)
1327{
1328  int cost;
1329
1330  /* A USE insn, or something else we don't need to
1331     understand.  We can't pass these directly to
1332     result_ready_cost or insn_default_latency because it will
1333     trigger a fatal error for unrecognizable insns.  */
1334  if (recog_memoized (insn) < 0)
1335    cost = 0;
1336  else
1337    {
1338      cost = insn_default_latency (insn);
1339
1340      if (cost < 0)
1341	cost = 0;
1342    }
1343
1344  return cost;
1345}
1346
1347/* Return the cost of the VI.
1348   !!! FIXME: Unify with haifa-sched.c: insn_cost ().  */
1349int
1350sel_vinsn_cost (vinsn_t vi)
1351{
1352  int cost = vi->cost;
1353
1354  if (cost < 0)
1355    {
1356      cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1357      vi->cost = cost;
1358    }
1359
1360  return cost;
1361}
1362
1363
1364/* Functions for insn emitting.  */
1365
1366/* Emit new insn after AFTER based on PATTERN and initialize its data from
1367   EXPR and SEQNO.  */
1368insn_t
1369sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1370{
1371  insn_t new_insn;
1372
1373  gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1374
1375  new_insn = emit_insn_after (pattern, after);
1376  set_insn_init (expr, NULL, seqno);
1377  sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1378
1379  return new_insn;
1380}
1381
1382/* Force newly generated vinsns to be unique.  */
1383static bool init_insn_force_unique_p = false;
1384
1385/* Emit new speculation recovery insn after AFTER based on PATTERN and
1386   initialize its data from EXPR and SEQNO.  */
1387insn_t
1388sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1389				      insn_t after)
1390{
1391  insn_t insn;
1392
1393  gcc_assert (!init_insn_force_unique_p);
1394
1395  init_insn_force_unique_p = true;
1396  insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1397  CANT_MOVE (insn) = 1;
1398  init_insn_force_unique_p = false;
1399
1400  return insn;
1401}
1402
1403/* Emit new insn after AFTER based on EXPR and SEQNO.  If VINSN is not NULL,
1404   take it as a new vinsn instead of EXPR's vinsn.
1405   We simplify insns later, after scheduling region in
1406   simplify_changed_insns.  */
1407insn_t
1408sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1409                              insn_t after)
1410{
1411  expr_t emit_expr;
1412  insn_t insn;
1413  int flags;
1414
1415  emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1416                             seqno);
1417  insn = EXPR_INSN_RTX (emit_expr);
1418
1419  /* The insn may come from the transformation cache, which may hold already
1420     deleted insns, so mark it as not deleted.  */
1421  insn->set_undeleted ();
1422
1423  add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1424
1425  flags = INSN_INIT_TODO_SSID;
1426  if (INSN_LUID (insn) == 0)
1427    flags |= INSN_INIT_TODO_LUID;
1428  sel_init_new_insn (insn, flags);
1429
1430  return insn;
1431}
1432
1433/* Move insn from EXPR after AFTER.  */
1434insn_t
1435sel_move_insn (expr_t expr, int seqno, insn_t after)
1436{
1437  insn_t insn = EXPR_INSN_RTX (expr);
1438  basic_block bb = BLOCK_FOR_INSN (after);
1439  insn_t next = NEXT_INSN (after);
1440
1441  /* Assert that in move_op we disconnected this insn properly.  */
1442  gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1443  SET_PREV_INSN (insn) = after;
1444  SET_NEXT_INSN (insn) = next;
1445
1446  SET_NEXT_INSN (after) = insn;
1447  SET_PREV_INSN (next) = insn;
1448
1449  /* Update links from insn to bb and vice versa.  */
1450  df_insn_change_bb (insn, bb);
1451  if (BB_END (bb) == after)
1452    BB_END (bb) = insn;
1453
1454  prepare_insn_expr (insn, seqno);
1455  return insn;
1456}
1457
1458
1459/* Functions to work with right-hand sides.  */
1460
1461/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1462   VECT and return true when found.  Use NEW_VINSN for comparison only when
1463   COMPARE_VINSNS is true.  Write to INDP the index on which
1464   the search has stopped, such that inserting the new element at INDP will
1465   retain VECT's sort order.  */
1466static bool
1467find_in_history_vect_1 (vec<expr_history_def> vect,
1468                        unsigned uid, vinsn_t new_vinsn,
1469                        bool compare_vinsns, int *indp)
1470{
1471  expr_history_def *arr;
1472  int i, j, len = vect.length ();
1473
1474  if (len == 0)
1475    {
1476      *indp = 0;
1477      return false;
1478    }
1479
1480  arr = vect.address ();
1481  i = 0, j = len - 1;
1482
1483  while (i <= j)
1484    {
1485      unsigned auid = arr[i].uid;
1486      vinsn_t avinsn = arr[i].new_expr_vinsn;
1487
1488      if (auid == uid
1489          /* When undoing transformation on a bookkeeping copy, the new vinsn
1490             may not be exactly equal to the one that is saved in the vector.
1491             This is because the insn whose copy we're checking was possibly
1492             substituted itself.  */
1493          && (! compare_vinsns
1494              || vinsn_equal_p (avinsn, new_vinsn)))
1495        {
1496          *indp = i;
1497          return true;
1498        }
1499      else if (auid > uid)
1500        break;
1501      i++;
1502    }
1503
1504  *indp = i;
1505  return false;
1506}
1507
1508/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT.  Return
1509   the position found or -1, if no such value is in vector.
1510   Search also for UIDs of insn's originators, if ORIGINATORS_P is true.  */
1511int
1512find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1513                      vinsn_t new_vinsn, bool originators_p)
1514{
1515  int ind;
1516
1517  if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1518                              false, &ind))
1519    return ind;
1520
1521  if (INSN_ORIGINATORS (insn) && originators_p)
1522    {
1523      unsigned uid;
1524      bitmap_iterator bi;
1525
1526      EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1527        if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1528          return ind;
1529    }
1530
1531  return -1;
1532}
1533
1534/* Insert new element in a sorted history vector pointed to by PVECT,
1535   if it is not there already.  The element is searched using
1536   UID/NEW_EXPR_VINSN pair.  TYPE, OLD_EXPR_VINSN and SPEC_DS save
1537   the history of a transformation.  */
1538void
1539insert_in_history_vect (vec<expr_history_def> *pvect,
1540                        unsigned uid, enum local_trans_type type,
1541                        vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1542                        ds_t spec_ds)
1543{
1544  vec<expr_history_def> vect = *pvect;
1545  expr_history_def temp;
1546  bool res;
1547  int ind;
1548
1549  res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1550
1551  if (res)
1552    {
1553      expr_history_def *phist = &vect[ind];
1554
1555      /* It is possible that speculation types of expressions that were
1556         propagated through different paths will be different here.  In this
1557         case, merge the status to get the correct check later.  */
1558      if (phist->spec_ds != spec_ds)
1559        phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1560      return;
1561    }
1562
1563  temp.uid = uid;
1564  temp.old_expr_vinsn = old_expr_vinsn;
1565  temp.new_expr_vinsn = new_expr_vinsn;
1566  temp.spec_ds = spec_ds;
1567  temp.type = type;
1568
1569  vinsn_attach (old_expr_vinsn);
1570  vinsn_attach (new_expr_vinsn);
1571  vect.safe_insert (ind, temp);
1572  *pvect = vect;
1573}
1574
1575/* Free history vector PVECT.  */
1576static void
1577free_history_vect (vec<expr_history_def> &pvect)
1578{
1579  unsigned i;
1580  expr_history_def *phist;
1581
1582  if (! pvect.exists ())
1583    return;
1584
1585  for (i = 0; pvect.iterate (i, &phist); i++)
1586    {
1587      vinsn_detach (phist->old_expr_vinsn);
1588      vinsn_detach (phist->new_expr_vinsn);
1589    }
1590
1591  pvect.release ();
1592}
1593
1594/* Merge vector FROM to PVECT.  */
1595static void
1596merge_history_vect (vec<expr_history_def> *pvect,
1597		    vec<expr_history_def> from)
1598{
1599  expr_history_def *phist;
1600  int i;
1601
1602  /* We keep this vector sorted.  */
1603  for (i = 0; from.iterate (i, &phist); i++)
1604    insert_in_history_vect (pvect, phist->uid, phist->type,
1605                            phist->old_expr_vinsn, phist->new_expr_vinsn,
1606                            phist->spec_ds);
1607}
1608
1609/* Compare two vinsns as rhses if possible and as vinsns otherwise.  */
1610bool
1611vinsn_equal_p (vinsn_t x, vinsn_t y)
1612{
1613  rtx_equal_p_callback_function repcf;
1614
1615  if (x == y)
1616    return true;
1617
1618  if (VINSN_TYPE (x) != VINSN_TYPE (y))
1619    return false;
1620
1621  if (VINSN_HASH (x) != VINSN_HASH (y))
1622    return false;
1623
1624  repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1625  if (VINSN_SEPARABLE_P (x))
1626    {
1627      /* Compare RHSes of VINSNs.  */
1628      gcc_assert (VINSN_RHS (x));
1629      gcc_assert (VINSN_RHS (y));
1630
1631      return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1632    }
1633
1634  return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1635}
1636
1637
1638/* Functions for working with expressions.  */
1639
1640/* Initialize EXPR.  */
1641static void
1642init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1643	   int sched_times, int orig_bb_index, ds_t spec_done_ds,
1644	   ds_t spec_to_check_ds, int orig_sched_cycle,
1645	   vec<expr_history_def> history,
1646	   signed char target_available,
1647           bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1648           bool cant_move)
1649{
1650  vinsn_attach (vi);
1651
1652  EXPR_VINSN (expr) = vi;
1653  EXPR_SPEC (expr) = spec;
1654  EXPR_USEFULNESS (expr) = use;
1655  EXPR_PRIORITY (expr) = priority;
1656  EXPR_PRIORITY_ADJ (expr) = 0;
1657  EXPR_SCHED_TIMES (expr) = sched_times;
1658  EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1659  EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1660  EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1661  EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1662
1663  if (history.exists ())
1664    EXPR_HISTORY_OF_CHANGES (expr) = history;
1665  else
1666    EXPR_HISTORY_OF_CHANGES (expr).create (0);
1667
1668  EXPR_TARGET_AVAILABLE (expr) = target_available;
1669  EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1670  EXPR_WAS_RENAMED (expr) = was_renamed;
1671  EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1672  EXPR_CANT_MOVE (expr) = cant_move;
1673}
1674
1675/* Make a copy of the expr FROM into the expr TO.  */
1676void
1677copy_expr (expr_t to, expr_t from)
1678{
1679  vec<expr_history_def> temp = vNULL;
1680
1681  if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1682    {
1683      unsigned i;
1684      expr_history_def *phist;
1685
1686      temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1687      for (i = 0;
1688           temp.iterate (i, &phist);
1689           i++)
1690        {
1691          vinsn_attach (phist->old_expr_vinsn);
1692          vinsn_attach (phist->new_expr_vinsn);
1693        }
1694    }
1695
1696  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1697             EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1698	     EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1699	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1700	     EXPR_ORIG_SCHED_CYCLE (from), temp,
1701             EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1702             EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1703             EXPR_CANT_MOVE (from));
1704}
1705
1706/* Same, but the final expr will not ever be in av sets, so don't copy
1707   "uninteresting" data such as bitmap cache.  */
1708void
1709copy_expr_onside (expr_t to, expr_t from)
1710{
1711  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1712	     EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1713	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1714	     vNULL,
1715	     EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1716	     EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1717             EXPR_CANT_MOVE (from));
1718}
1719
1720/* Prepare the expr of INSN for scheduling.  Used when moving insn and when
1721   initializing new insns.  */
1722static void
1723prepare_insn_expr (insn_t insn, int seqno)
1724{
1725  expr_t expr = INSN_EXPR (insn);
1726  ds_t ds;
1727
1728  INSN_SEQNO (insn) = seqno;
1729  EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1730  EXPR_SPEC (expr) = 0;
1731  EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1732  EXPR_WAS_SUBSTITUTED (expr) = 0;
1733  EXPR_WAS_RENAMED (expr) = 0;
1734  EXPR_TARGET_AVAILABLE (expr) = 1;
1735  INSN_LIVE_VALID_P (insn) = false;
1736
1737  /* ??? If this expression is speculative, make its dependence
1738     as weak as possible.  We can filter this expression later
1739     in process_spec_exprs, because we do not distinguish
1740     between the status we got during compute_av_set and the
1741     existing status.  To be fixed.  */
1742  ds = EXPR_SPEC_DONE_DS (expr);
1743  if (ds)
1744    EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1745
1746  free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1747}
1748
1749/* Update target_available bits when merging exprs TO and FROM.  SPLIT_POINT
1750   is non-null when expressions are merged from different successors at
1751   a split point.  */
1752static void
1753update_target_availability (expr_t to, expr_t from, insn_t split_point)
1754{
1755  if (EXPR_TARGET_AVAILABLE (to) < 0
1756      || EXPR_TARGET_AVAILABLE (from) < 0)
1757    EXPR_TARGET_AVAILABLE (to) = -1;
1758  else
1759    {
1760      /* We try to detect the case when one of the expressions
1761         can only be reached through another one.  In this case,
1762         we can do better.  */
1763      if (split_point == NULL)
1764        {
1765          int toind, fromind;
1766
1767          toind = EXPR_ORIG_BB_INDEX (to);
1768          fromind = EXPR_ORIG_BB_INDEX (from);
1769
1770          if (toind && toind == fromind)
1771            /* Do nothing -- everything is done in
1772               merge_with_other_exprs.  */
1773            ;
1774          else
1775            EXPR_TARGET_AVAILABLE (to) = -1;
1776        }
1777      else if (EXPR_TARGET_AVAILABLE (from) == 0
1778	       && EXPR_LHS (from)
1779	       && REG_P (EXPR_LHS (from))
1780	       && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1781	EXPR_TARGET_AVAILABLE (to) = -1;
1782      else
1783        EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1784    }
1785}
1786
1787/* Update speculation bits when merging exprs TO and FROM.  SPLIT_POINT
1788   is non-null when expressions are merged from different successors at
1789   a split point.  */
1790static void
1791update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1792{
1793  ds_t old_to_ds, old_from_ds;
1794
1795  old_to_ds = EXPR_SPEC_DONE_DS (to);
1796  old_from_ds = EXPR_SPEC_DONE_DS (from);
1797
1798  EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1799  EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1800  EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1801
1802  /* When merging e.g. control & data speculative exprs, or a control
1803     speculative with a control&data speculative one, we really have
1804     to change vinsn too.  Also, when speculative status is changed,
1805     we also need to record this as a transformation in expr's history.  */
1806  if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1807    {
1808      old_to_ds = ds_get_speculation_types (old_to_ds);
1809      old_from_ds = ds_get_speculation_types (old_from_ds);
1810
1811      if (old_to_ds != old_from_ds)
1812        {
1813          ds_t record_ds;
1814
1815          /* When both expressions are speculative, we need to change
1816             the vinsn first.  */
1817          if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1818            {
1819              int res;
1820
1821              res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1822              gcc_assert (res >= 0);
1823            }
1824
1825          if (split_point != NULL)
1826            {
1827              /* Record the change with proper status.  */
1828              record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1829              record_ds &= ~(old_to_ds & SPECULATIVE);
1830              record_ds &= ~(old_from_ds & SPECULATIVE);
1831
1832              insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1833                                      INSN_UID (split_point), TRANS_SPECULATION,
1834                                      EXPR_VINSN (from), EXPR_VINSN (to),
1835                                      record_ds);
1836            }
1837        }
1838    }
1839}
1840
1841
1842/* Merge bits of FROM expr to TO expr.  When SPLIT_POINT is not NULL,
1843   this is done along different paths.  */
1844void
1845merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1846{
1847  /* Choose the maximum of the specs of merged exprs.  This is required
1848     for correctness of bookkeeping.  */
1849  if (EXPR_SPEC (to) < EXPR_SPEC (from))
1850    EXPR_SPEC (to) = EXPR_SPEC (from);
1851
1852  if (split_point)
1853    EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1854  else
1855    EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1856                                EXPR_USEFULNESS (from));
1857
1858  if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1859    EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1860
1861  if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1862    EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1863
1864  if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1865    EXPR_ORIG_BB_INDEX (to) = 0;
1866
1867  EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1868                                    EXPR_ORIG_SCHED_CYCLE (from));
1869
1870  EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1871  EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1872  EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1873
1874  merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1875		      EXPR_HISTORY_OF_CHANGES (from));
1876  update_target_availability (to, from, split_point);
1877  update_speculative_bits (to, from, split_point);
1878}
1879
1880/* Merge bits of FROM expr to TO expr.  Vinsns in the exprs should be equal
1881   in terms of vinsn_equal_p.  SPLIT_POINT is non-null when expressions
1882   are merged from different successors at a split point.  */
1883void
1884merge_expr (expr_t to, expr_t from, insn_t split_point)
1885{
1886  vinsn_t to_vi = EXPR_VINSN (to);
1887  vinsn_t from_vi = EXPR_VINSN (from);
1888
1889  gcc_assert (vinsn_equal_p (to_vi, from_vi));
1890
1891  /* Make sure that speculative pattern is propagated into exprs that
1892     have non-speculative one.  This will provide us with consistent
1893     speculative bits and speculative patterns inside expr.  */
1894  if (EXPR_SPEC_DONE_DS (to) == 0
1895      && (EXPR_SPEC_DONE_DS (from) != 0
1896	  /* Do likewise for volatile insns, so that we always retain
1897	     the may_trap_p bit on the resulting expression.  However,
1898	     avoid propagating the trapping bit into the instructions
1899	     already speculated.  This would result in replacing the
1900	     speculative pattern with the non-speculative one and breaking
1901	     the speculation support.  */
1902	  || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to))
1903	      && VINSN_MAY_TRAP_P (EXPR_VINSN (from)))))
1904    change_vinsn_in_expr (to, EXPR_VINSN (from));
1905
1906  merge_expr_data (to, from, split_point);
1907  gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1908}
1909
1910/* Clear the information of this EXPR.  */
1911void
1912clear_expr (expr_t expr)
1913{
1914
1915  vinsn_detach (EXPR_VINSN (expr));
1916  EXPR_VINSN (expr) = NULL;
1917
1918  free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1919}
1920
1921/* For a given LV_SET, mark EXPR having unavailable target register.  */
1922static void
1923set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1924{
1925  if (EXPR_SEPARABLE_P (expr))
1926    {
1927      if (REG_P (EXPR_LHS (expr))
1928          && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1929	{
1930	  /* If it's an insn like r1 = use (r1, ...), and it exists in
1931	     different forms in each of the av_sets being merged, we can't say
1932	     whether original destination register is available or not.
1933	     However, this still works if destination register is not used
1934	     in the original expression: if the branch at which LV_SET we're
1935	     looking here is not actually 'other branch' in sense that same
1936	     expression is available through it (but it can't be determined
1937	     at computation stage because of transformations on one of the
1938	     branches), it still won't affect the availability.
1939	     Liveness of a register somewhere on a code motion path means
1940	     it's either read somewhere on a codemotion path, live on
1941	     'other' branch, live at the point immediately following
1942	     the original operation, or is read by the original operation.
1943	     The latter case is filtered out in the condition below.
1944	     It still doesn't cover the case when register is defined and used
1945	     somewhere within the code motion path, and in this case we could
1946	     miss a unifying code motion along both branches using a renamed
1947	     register, but it won't affect a code correctness since upon
1948	     an actual code motion a bookkeeping code would be generated.  */
1949	  if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1950				      EXPR_LHS (expr)))
1951	    EXPR_TARGET_AVAILABLE (expr) = -1;
1952	  else
1953	    EXPR_TARGET_AVAILABLE (expr) = false;
1954	}
1955    }
1956  else
1957    {
1958      unsigned regno;
1959      reg_set_iterator rsi;
1960
1961      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1962                                 0, regno, rsi)
1963        if (bitmap_bit_p (lv_set, regno))
1964          {
1965            EXPR_TARGET_AVAILABLE (expr) = false;
1966            break;
1967          }
1968
1969      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1970                                 0, regno, rsi)
1971        if (bitmap_bit_p (lv_set, regno))
1972          {
1973            EXPR_TARGET_AVAILABLE (expr) = false;
1974            break;
1975          }
1976    }
1977}
1978
1979/* Try to make EXPR speculative.  Return 1 when EXPR's pattern
1980   or dependence status have changed, 2 when also the target register
1981   became unavailable, 0 if nothing had to be changed.  */
1982int
1983speculate_expr (expr_t expr, ds_t ds)
1984{
1985  int res;
1986  rtx_insn *orig_insn_rtx;
1987  rtx spec_pat;
1988  ds_t target_ds, current_ds;
1989
1990  /* Obtain the status we need to put on EXPR.   */
1991  target_ds = (ds & SPECULATIVE);
1992  current_ds = EXPR_SPEC_DONE_DS (expr);
1993  ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1994
1995  orig_insn_rtx = EXPR_INSN_RTX (expr);
1996
1997  res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1998
1999  switch (res)
2000    {
2001    case 0:
2002      EXPR_SPEC_DONE_DS (expr) = ds;
2003      return current_ds != ds ? 1 : 0;
2004
2005    case 1:
2006      {
2007	rtx_insn *spec_insn_rtx =
2008	  create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
2009	vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
2010
2011	change_vinsn_in_expr (expr, spec_vinsn);
2012	EXPR_SPEC_DONE_DS (expr) = ds;
2013        EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
2014
2015        /* Do not allow clobbering the address register of speculative
2016           insns.  */
2017        if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
2018				    expr_dest_reg (expr)))
2019          {
2020            EXPR_TARGET_AVAILABLE (expr) = false;
2021            return 2;
2022          }
2023
2024	return 1;
2025      }
2026
2027    case -1:
2028      return -1;
2029
2030    default:
2031      gcc_unreachable ();
2032      return -1;
2033    }
2034}
2035
2036/* Return a destination register, if any, of EXPR.  */
2037rtx
2038expr_dest_reg (expr_t expr)
2039{
2040  rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2041
2042  if (dest != NULL_RTX && REG_P (dest))
2043    return dest;
2044
2045  return NULL_RTX;
2046}
2047
2048/* Returns the REGNO of the R's destination.  */
2049unsigned
2050expr_dest_regno (expr_t expr)
2051{
2052  rtx dest = expr_dest_reg (expr);
2053
2054  gcc_assert (dest != NULL_RTX);
2055  return REGNO (dest);
2056}
2057
2058/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2059   AV_SET having unavailable target register.  */
2060void
2061mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2062{
2063  expr_t expr;
2064  av_set_iterator avi;
2065
2066  FOR_EACH_EXPR (expr, avi, join_set)
2067    if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2068      set_unavailable_target_for_expr (expr, lv_set);
2069}
2070
2071
2072/* Returns true if REG (at least partially) is present in REGS.  */
2073bool
2074register_unavailable_p (regset regs, rtx reg)
2075{
2076  unsigned regno, end_regno;
2077
2078  regno = REGNO (reg);
2079  if (bitmap_bit_p (regs, regno))
2080    return true;
2081
2082  end_regno = END_REGNO (reg);
2083
2084  while (++regno < end_regno)
2085    if (bitmap_bit_p (regs, regno))
2086      return true;
2087
2088  return false;
2089}
2090
2091/* Av set functions.  */
2092
2093/* Add a new element to av set SETP.
2094   Return the element added.  */
2095static av_set_t
2096av_set_add_element (av_set_t *setp)
2097{
2098  /* Insert at the beginning of the list.  */
2099  _list_add (setp);
2100  return *setp;
2101}
2102
2103/* Add EXPR to SETP.  */
2104void
2105av_set_add (av_set_t *setp, expr_t expr)
2106{
2107  av_set_t elem;
2108
2109  gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2110  elem = av_set_add_element (setp);
2111  copy_expr (_AV_SET_EXPR (elem), expr);
2112}
2113
2114/* Same, but do not copy EXPR.  */
2115static void
2116av_set_add_nocopy (av_set_t *setp, expr_t expr)
2117{
2118  av_set_t elem;
2119
2120  elem = av_set_add_element (setp);
2121  *_AV_SET_EXPR (elem) = *expr;
2122}
2123
2124/* Remove expr pointed to by IP from the av_set.  */
2125void
2126av_set_iter_remove (av_set_iterator *ip)
2127{
2128  clear_expr (_AV_SET_EXPR (*ip->lp));
2129  _list_iter_remove (ip);
2130}
2131
2132/* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2133   sense of vinsn_equal_p function. Return NULL if no such expr is
2134   in SET was found.  */
2135expr_t
2136av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2137{
2138  expr_t expr;
2139  av_set_iterator i;
2140
2141  FOR_EACH_EXPR (expr, i, set)
2142    if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2143      return expr;
2144  return NULL;
2145}
2146
2147/* Same, but also remove the EXPR found.   */
2148static expr_t
2149av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2150{
2151  expr_t expr;
2152  av_set_iterator i;
2153
2154  FOR_EACH_EXPR_1 (expr, i, setp)
2155    if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2156      {
2157        _list_iter_remove_nofree (&i);
2158        return expr;
2159      }
2160  return NULL;
2161}
2162
2163/* Search for an expr in SET, such that it's equivalent to EXPR in the
2164   sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2165   Returns NULL if no such expr is in SET was found.  */
2166static expr_t
2167av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2168{
2169  expr_t cur_expr;
2170  av_set_iterator i;
2171
2172  FOR_EACH_EXPR (cur_expr, i, set)
2173    {
2174      if (cur_expr == expr)
2175        continue;
2176      if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2177        return cur_expr;
2178    }
2179
2180  return NULL;
2181}
2182
2183/* If other expression is already in AVP, remove one of them.  */
2184expr_t
2185merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2186{
2187  expr_t expr2;
2188
2189  expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2190  if (expr2 != NULL)
2191    {
2192      /* Reset target availability on merge, since taking it only from one
2193	 of the exprs would be controversial for different code.  */
2194      EXPR_TARGET_AVAILABLE (expr2) = -1;
2195      EXPR_USEFULNESS (expr2) = 0;
2196
2197      merge_expr (expr2, expr, NULL);
2198
2199      /* Fix usefulness as it should be now REG_BR_PROB_BASE.  */
2200      EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2201
2202      av_set_iter_remove (ip);
2203      return expr2;
2204    }
2205
2206  return expr;
2207}
2208
2209/* Return true if there is an expr that correlates to VI in SET.  */
2210bool
2211av_set_is_in_p (av_set_t set, vinsn_t vi)
2212{
2213  return av_set_lookup (set, vi) != NULL;
2214}
2215
2216/* Return a copy of SET.  */
2217av_set_t
2218av_set_copy (av_set_t set)
2219{
2220  expr_t expr;
2221  av_set_iterator i;
2222  av_set_t res = NULL;
2223
2224  FOR_EACH_EXPR (expr, i, set)
2225    av_set_add (&res, expr);
2226
2227  return res;
2228}
2229
2230/* Join two av sets that do not have common elements by attaching second set
2231   (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2232   _AV_SET_NEXT of first set's last element).  */
2233static void
2234join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2235{
2236  gcc_assert (*to_tailp == NULL);
2237  *to_tailp = *fromp;
2238  *fromp = NULL;
2239}
2240
2241/* Makes set pointed to by TO to be the union of TO and FROM.  Clear av_set
2242   pointed to by FROMP afterwards.  */
2243void
2244av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2245{
2246  expr_t expr1;
2247  av_set_iterator i;
2248
2249  /* Delete from TOP all exprs, that present in FROMP.  */
2250  FOR_EACH_EXPR_1 (expr1, i, top)
2251    {
2252      expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2253
2254      if (expr2)
2255	{
2256          merge_expr (expr2, expr1, insn);
2257	  av_set_iter_remove (&i);
2258	}
2259    }
2260
2261  join_distinct_sets (i.lp, fromp);
2262}
2263
2264/* Same as above, but also update availability of target register in
2265   TOP judging by TO_LV_SET and FROM_LV_SET.  */
2266void
2267av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2268                       regset from_lv_set, insn_t insn)
2269{
2270  expr_t expr1;
2271  av_set_iterator i;
2272  av_set_t *to_tailp, in_both_set = NULL;
2273
2274  /* Delete from TOP all expres, that present in FROMP.  */
2275  FOR_EACH_EXPR_1 (expr1, i, top)
2276    {
2277      expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2278
2279      if (expr2)
2280	{
2281          /* It may be that the expressions have different destination
2282             registers, in which case we need to check liveness here.  */
2283          if (EXPR_SEPARABLE_P (expr1))
2284            {
2285              int regno1 = (REG_P (EXPR_LHS (expr1))
2286                            ? (int) expr_dest_regno (expr1) : -1);
2287              int regno2 = (REG_P (EXPR_LHS (expr2))
2288                            ? (int) expr_dest_regno (expr2) : -1);
2289
2290              /* ??? We don't have a way to check restrictions for
2291               *other* register on the current path, we did it only
2292               for the current target register.  Give up.  */
2293              if (regno1 != regno2)
2294                EXPR_TARGET_AVAILABLE (expr2) = -1;
2295            }
2296          else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2297            EXPR_TARGET_AVAILABLE (expr2) = -1;
2298
2299          merge_expr (expr2, expr1, insn);
2300          av_set_add_nocopy (&in_both_set, expr2);
2301	  av_set_iter_remove (&i);
2302	}
2303      else
2304        /* EXPR1 is present in TOP, but not in FROMP.  Check it on
2305           FROM_LV_SET.  */
2306        set_unavailable_target_for_expr (expr1, from_lv_set);
2307    }
2308  to_tailp = i.lp;
2309
2310  /* These expressions are not present in TOP.  Check liveness
2311     restrictions on TO_LV_SET.  */
2312  FOR_EACH_EXPR (expr1, i, *fromp)
2313    set_unavailable_target_for_expr (expr1, to_lv_set);
2314
2315  join_distinct_sets (i.lp, &in_both_set);
2316  join_distinct_sets (to_tailp, fromp);
2317}
2318
2319/* Clear av_set pointed to by SETP.  */
2320void
2321av_set_clear (av_set_t *setp)
2322{
2323  expr_t expr;
2324  av_set_iterator i;
2325
2326  FOR_EACH_EXPR_1 (expr, i, setp)
2327    av_set_iter_remove (&i);
2328
2329  gcc_assert (*setp == NULL);
2330}
2331
2332/* Leave only one non-speculative element in the SETP.  */
2333void
2334av_set_leave_one_nonspec (av_set_t *setp)
2335{
2336  expr_t expr;
2337  av_set_iterator i;
2338  bool has_one_nonspec = false;
2339
2340  /* Keep all speculative exprs, and leave one non-speculative
2341     (the first one).  */
2342  FOR_EACH_EXPR_1 (expr, i, setp)
2343    {
2344      if (!EXPR_SPEC_DONE_DS (expr))
2345	{
2346  	  if (has_one_nonspec)
2347	    av_set_iter_remove (&i);
2348	  else
2349	    has_one_nonspec = true;
2350	}
2351    }
2352}
2353
2354/* Return the N'th element of the SET.  */
2355expr_t
2356av_set_element (av_set_t set, int n)
2357{
2358  expr_t expr;
2359  av_set_iterator i;
2360
2361  FOR_EACH_EXPR (expr, i, set)
2362    if (n-- == 0)
2363      return expr;
2364
2365  gcc_unreachable ();
2366  return NULL;
2367}
2368
2369/* Deletes all expressions from AVP that are conditional branches (IFs).  */
2370void
2371av_set_substract_cond_branches (av_set_t *avp)
2372{
2373  av_set_iterator i;
2374  expr_t expr;
2375
2376  FOR_EACH_EXPR_1 (expr, i, avp)
2377    if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2378      av_set_iter_remove (&i);
2379}
2380
2381/* Multiplies usefulness attribute of each member of av-set *AVP by
2382   value PROB / ALL_PROB.  */
2383void
2384av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2385{
2386  av_set_iterator i;
2387  expr_t expr;
2388
2389  FOR_EACH_EXPR (expr, i, av)
2390    EXPR_USEFULNESS (expr) = (all_prob
2391                              ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2392                              : 0);
2393}
2394
2395/* Leave in AVP only those expressions, which are present in AV,
2396   and return it, merging history expressions.  */
2397void
2398av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2399{
2400  av_set_iterator i;
2401  expr_t expr, expr2;
2402
2403  FOR_EACH_EXPR_1 (expr, i, avp)
2404    if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2405      av_set_iter_remove (&i);
2406    else
2407      /* When updating av sets in bookkeeping blocks, we can add more insns
2408	 there which will be transformed but the upper av sets will not
2409	 reflect those transformations.  We then fail to undo those
2410	 when searching for such insns.  So merge the history saved
2411	 in the av set of the block we are processing.  */
2412      merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2413			  EXPR_HISTORY_OF_CHANGES (expr2));
2414}
2415
2416
2417
2418/* Dependence hooks to initialize insn data.  */
2419
2420/* This is used in hooks callable from dependence analysis when initializing
2421   instruction's data.  */
2422static struct
2423{
2424  /* Where the dependence was found (lhs/rhs).  */
2425  deps_where_t where;
2426
2427  /* The actual data object to initialize.  */
2428  idata_t id;
2429
2430  /* True when the insn should not be made clonable.  */
2431  bool force_unique_p;
2432
2433  /* True when insn should be treated as of type USE, i.e. never renamed.  */
2434  bool force_use_p;
2435} deps_init_id_data;
2436
2437
2438/* Setup ID for INSN.  FORCE_UNIQUE_P is true when INSN should not be
2439   clonable.  */
2440static void
2441setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2442{
2443  int type;
2444
2445  /* Determine whether INSN could be cloned and return appropriate vinsn type.
2446     That clonable insns which can be separated into lhs and rhs have type SET.
2447     Other clonable insns have type USE.  */
2448  type = GET_CODE (insn);
2449
2450  /* Only regular insns could be cloned.  */
2451  if (type == INSN && !force_unique_p)
2452    type = SET;
2453  else if (type == JUMP_INSN && simplejump_p (insn))
2454    type = PC;
2455  else if (type == DEBUG_INSN)
2456    type = !force_unique_p ? USE : INSN;
2457
2458  IDATA_TYPE (id) = type;
2459  IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2460  IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2461  IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2462}
2463
2464/* Start initializing insn data.  */
2465static void
2466deps_init_id_start_insn (insn_t insn)
2467{
2468  gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2469
2470  setup_id_for_insn (deps_init_id_data.id, insn,
2471                     deps_init_id_data.force_unique_p);
2472  deps_init_id_data.where = DEPS_IN_INSN;
2473}
2474
2475/* Start initializing lhs data.  */
2476static void
2477deps_init_id_start_lhs (rtx lhs)
2478{
2479  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2480  gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2481
2482  if (IDATA_TYPE (deps_init_id_data.id) == SET)
2483    {
2484      IDATA_LHS (deps_init_id_data.id) = lhs;
2485      deps_init_id_data.where = DEPS_IN_LHS;
2486    }
2487}
2488
2489/* Finish initializing lhs data.  */
2490static void
2491deps_init_id_finish_lhs (void)
2492{
2493  deps_init_id_data.where = DEPS_IN_INSN;
2494}
2495
2496/* Note a set of REGNO.  */
2497static void
2498deps_init_id_note_reg_set (int regno)
2499{
2500  haifa_note_reg_set (regno);
2501
2502  if (deps_init_id_data.where == DEPS_IN_RHS)
2503    deps_init_id_data.force_use_p = true;
2504
2505  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2506    SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2507
2508#ifdef STACK_REGS
2509  /* Make instructions that set stack registers to be ineligible for
2510     renaming to avoid issues with find_used_regs.  */
2511  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2512    deps_init_id_data.force_use_p = true;
2513#endif
2514}
2515
2516/* Note a clobber of REGNO.  */
2517static void
2518deps_init_id_note_reg_clobber (int regno)
2519{
2520  haifa_note_reg_clobber (regno);
2521
2522  if (deps_init_id_data.where == DEPS_IN_RHS)
2523    deps_init_id_data.force_use_p = true;
2524
2525  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2526    SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2527}
2528
2529/* Note a use of REGNO.  */
2530static void
2531deps_init_id_note_reg_use (int regno)
2532{
2533  haifa_note_reg_use (regno);
2534
2535  if (IDATA_TYPE (deps_init_id_data.id) != PC)
2536    SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2537}
2538
2539/* Start initializing rhs data.  */
2540static void
2541deps_init_id_start_rhs (rtx rhs)
2542{
2543  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2544
2545  /* And there was no sel_deps_reset_to_insn ().  */
2546  if (IDATA_LHS (deps_init_id_data.id) != NULL)
2547    {
2548      IDATA_RHS (deps_init_id_data.id) = rhs;
2549      deps_init_id_data.where = DEPS_IN_RHS;
2550    }
2551}
2552
2553/* Finish initializing rhs data.  */
2554static void
2555deps_init_id_finish_rhs (void)
2556{
2557  gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2558	      || deps_init_id_data.where == DEPS_IN_INSN);
2559  deps_init_id_data.where = DEPS_IN_INSN;
2560}
2561
2562/* Finish initializing insn data.  */
2563static void
2564deps_init_id_finish_insn (void)
2565{
2566  gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2567
2568  if (IDATA_TYPE (deps_init_id_data.id) == SET)
2569    {
2570      rtx lhs = IDATA_LHS (deps_init_id_data.id);
2571      rtx rhs = IDATA_RHS (deps_init_id_data.id);
2572
2573      if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2574	  || deps_init_id_data.force_use_p)
2575	{
2576          /* This should be a USE, as we don't want to schedule its RHS
2577             separately.  However, we still want to have them recorded
2578             for the purposes of substitution.  That's why we don't
2579             simply call downgrade_to_use () here.  */
2580	  gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2581	  gcc_assert (!lhs == !rhs);
2582
2583	  IDATA_TYPE (deps_init_id_data.id) = USE;
2584	}
2585    }
2586
2587  deps_init_id_data.where = DEPS_IN_NOWHERE;
2588}
2589
2590/* This is dependence info used for initializing insn's data.  */
2591static struct sched_deps_info_def deps_init_id_sched_deps_info;
2592
2593/* This initializes most of the static part of the above structure.  */
2594static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2595  {
2596    NULL,
2597
2598    deps_init_id_start_insn,
2599    deps_init_id_finish_insn,
2600    deps_init_id_start_lhs,
2601    deps_init_id_finish_lhs,
2602    deps_init_id_start_rhs,
2603    deps_init_id_finish_rhs,
2604    deps_init_id_note_reg_set,
2605    deps_init_id_note_reg_clobber,
2606    deps_init_id_note_reg_use,
2607    NULL, /* note_mem_dep */
2608    NULL, /* note_dep */
2609
2610    0, /* use_cselib */
2611    0, /* use_deps_list */
2612    0 /* generate_spec_deps */
2613  };
2614
2615/* Initialize INSN's lhs and rhs in ID.  When FORCE_UNIQUE_P is true,
2616   we don't actually need information about lhs and rhs.  */
2617static void
2618setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2619{
2620  rtx pat = PATTERN (insn);
2621
2622  if (NONJUMP_INSN_P (insn)
2623      && GET_CODE (pat) == SET
2624      && !force_unique_p)
2625    {
2626      IDATA_RHS (id) = SET_SRC (pat);
2627      IDATA_LHS (id) = SET_DEST (pat);
2628    }
2629  else
2630    IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2631}
2632
2633/* Possibly downgrade INSN to USE.  */
2634static void
2635maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2636{
2637  bool must_be_use = false;
2638  df_ref def;
2639  rtx lhs = IDATA_LHS (id);
2640  rtx rhs = IDATA_RHS (id);
2641
2642  /* We downgrade only SETs.  */
2643  if (IDATA_TYPE (id) != SET)
2644    return;
2645
2646  if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2647    {
2648      IDATA_TYPE (id) = USE;
2649      return;
2650    }
2651
2652  FOR_EACH_INSN_DEF (def, insn)
2653    {
2654      if (DF_REF_INSN (def)
2655          && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2656          && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2657        {
2658          must_be_use = true;
2659          break;
2660        }
2661
2662#ifdef STACK_REGS
2663      /* Make instructions that set stack registers to be ineligible for
2664	 renaming to avoid issues with find_used_regs.  */
2665      if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2666	{
2667	  must_be_use = true;
2668	  break;
2669	}
2670#endif
2671    }
2672
2673  if (must_be_use)
2674    IDATA_TYPE (id) = USE;
2675}
2676
2677/* Setup implicit register clobbers calculated by sched-deps for INSN
2678   before reload and save them in ID.  */
2679static void
2680setup_id_implicit_regs (idata_t id, insn_t insn)
2681{
2682  if (reload_completed)
2683    return;
2684
2685  HARD_REG_SET temp;
2686  unsigned regno;
2687  hard_reg_set_iterator hrsi;
2688
2689  get_implicit_reg_pending_clobbers (&temp, insn);
2690  EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2691    SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2692}
2693
2694/* Setup register sets describing INSN in ID.  */
2695static void
2696setup_id_reg_sets (idata_t id, insn_t insn)
2697{
2698  struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2699  df_ref def, use;
2700  regset tmp = get_clear_regset_from_pool ();
2701
2702  FOR_EACH_INSN_INFO_DEF (def, insn_info)
2703    {
2704      unsigned int regno = DF_REF_REGNO (def);
2705
2706      /* Post modifies are treated like clobbers by sched-deps.c.  */
2707      if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2708                                     | DF_REF_PRE_POST_MODIFY)))
2709        SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2710      else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2711        {
2712	  SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2713
2714#ifdef STACK_REGS
2715	  /* For stack registers, treat writes to them as writes
2716	     to the first one to be consistent with sched-deps.c.  */
2717	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2718	    SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2719#endif
2720	}
2721      /* Mark special refs that generate read/write def pair.  */
2722      if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2723          || regno == STACK_POINTER_REGNUM)
2724        bitmap_set_bit (tmp, regno);
2725    }
2726
2727  FOR_EACH_INSN_INFO_USE (use, insn_info)
2728    {
2729      unsigned int regno = DF_REF_REGNO (use);
2730
2731      /* When these refs are met for the first time, skip them, as
2732         these uses are just counterparts of some defs.  */
2733      if (bitmap_bit_p (tmp, regno))
2734        bitmap_clear_bit (tmp, regno);
2735      else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2736	{
2737	  SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2738
2739#ifdef STACK_REGS
2740	  /* For stack registers, treat reads from them as reads from
2741	     the first one to be consistent with sched-deps.c.  */
2742	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2743	    SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2744#endif
2745	}
2746    }
2747
2748  /* Also get implicit reg clobbers from sched-deps.  */
2749  setup_id_implicit_regs (id, insn);
2750
2751  return_regset_to_pool (tmp);
2752}
2753
2754/* Initialize instruction data for INSN in ID using DF's data.  */
2755static void
2756init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2757{
2758  gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2759
2760  setup_id_for_insn (id, insn, force_unique_p);
2761  setup_id_lhs_rhs (id, insn, force_unique_p);
2762
2763  if (INSN_NOP_P (insn))
2764    return;
2765
2766  maybe_downgrade_id_to_use (id, insn);
2767  setup_id_reg_sets (id, insn);
2768}
2769
2770/* Initialize instruction data for INSN in ID.  */
2771static void
2772deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2773{
2774  struct deps_desc _dc, *dc = &_dc;
2775
2776  deps_init_id_data.where = DEPS_IN_NOWHERE;
2777  deps_init_id_data.id = id;
2778  deps_init_id_data.force_unique_p = force_unique_p;
2779  deps_init_id_data.force_use_p = false;
2780
2781  init_deps (dc, false);
2782  memcpy (&deps_init_id_sched_deps_info,
2783	  &const_deps_init_id_sched_deps_info,
2784	  sizeof (deps_init_id_sched_deps_info));
2785  if (spec_info != NULL)
2786    deps_init_id_sched_deps_info.generate_spec_deps = 1;
2787  sched_deps_info = &deps_init_id_sched_deps_info;
2788
2789  deps_analyze_insn (dc, insn);
2790  /* Implicit reg clobbers received from sched-deps separately.  */
2791  setup_id_implicit_regs (id, insn);
2792
2793  free_deps (dc);
2794  deps_init_id_data.id = NULL;
2795}
2796
2797
2798struct sched_scan_info_def
2799{
2800  /* This hook notifies scheduler frontend to extend its internal per basic
2801     block data structures.  This hook should be called once before a series of
2802     calls to bb_init ().  */
2803  void (*extend_bb) (void);
2804
2805  /* This hook makes scheduler frontend to initialize its internal data
2806     structures for the passed basic block.  */
2807  void (*init_bb) (basic_block);
2808
2809  /* This hook notifies scheduler frontend to extend its internal per insn data
2810     structures.  This hook should be called once before a series of calls to
2811     insn_init ().  */
2812  void (*extend_insn) (void);
2813
2814  /* This hook makes scheduler frontend to initialize its internal data
2815     structures for the passed insn.  */
2816  void (*init_insn) (insn_t);
2817};
2818
2819/* A driver function to add a set of basic blocks (BBS) to the
2820   scheduling region.  */
2821static void
2822sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2823{
2824  unsigned i;
2825  basic_block bb;
2826
2827  if (ssi->extend_bb)
2828    ssi->extend_bb ();
2829
2830  if (ssi->init_bb)
2831    FOR_EACH_VEC_ELT (bbs, i, bb)
2832      ssi->init_bb (bb);
2833
2834  if (ssi->extend_insn)
2835    ssi->extend_insn ();
2836
2837  if (ssi->init_insn)
2838    FOR_EACH_VEC_ELT (bbs, i, bb)
2839      {
2840	rtx_insn *insn;
2841
2842	FOR_BB_INSNS (bb, insn)
2843	  ssi->init_insn (insn);
2844      }
2845}
2846
2847/* Implement hooks for collecting fundamental insn properties like if insn is
2848   an ASM or is within a SCHED_GROUP.  */
2849
2850/* True when a "one-time init" data for INSN was already inited.  */
2851static bool
2852first_time_insn_init (insn_t insn)
2853{
2854  return INSN_LIVE (insn) == NULL;
2855}
2856
2857/* Hash an entry in a transformed_insns hashtable.  */
2858static hashval_t
2859hash_transformed_insns (const void *p)
2860{
2861  return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2862}
2863
2864/* Compare the entries in a transformed_insns hashtable.  */
2865static int
2866eq_transformed_insns (const void *p, const void *q)
2867{
2868  rtx_insn *i1 =
2869    VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2870  rtx_insn *i2 =
2871    VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2872
2873  if (INSN_UID (i1) == INSN_UID (i2))
2874    return 1;
2875  return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2876}
2877
2878/* Free an entry in a transformed_insns hashtable.  */
2879static void
2880free_transformed_insns (void *p)
2881{
2882  struct transformed_insns *pti = (struct transformed_insns *) p;
2883
2884  vinsn_detach (pti->vinsn_old);
2885  vinsn_detach (pti->vinsn_new);
2886  free (pti);
2887}
2888
2889/* Init the s_i_d data for INSN which should be inited just once, when
2890   we first see the insn.  */
2891static void
2892init_first_time_insn_data (insn_t insn)
2893{
2894  /* This should not be set if this is the first time we init data for
2895     insn.  */
2896  gcc_assert (first_time_insn_init (insn));
2897
2898  /* These are needed for nops too.  */
2899  INSN_LIVE (insn) = get_regset_from_pool ();
2900  INSN_LIVE_VALID_P (insn) = false;
2901
2902  if (!INSN_NOP_P (insn))
2903    {
2904      INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2905      INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2906      INSN_TRANSFORMED_INSNS (insn)
2907        = htab_create (16, hash_transformed_insns,
2908                       eq_transformed_insns, free_transformed_insns);
2909      init_deps (&INSN_DEPS_CONTEXT (insn), true);
2910    }
2911}
2912
2913/* Free almost all above data for INSN that is scheduled already.
2914   Used for extra-large basic blocks.  */
2915void
2916free_data_for_scheduled_insn (insn_t insn)
2917{
2918  gcc_assert (! first_time_insn_init (insn));
2919
2920  if (! INSN_ANALYZED_DEPS (insn))
2921    return;
2922
2923  BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2924  BITMAP_FREE (INSN_FOUND_DEPS (insn));
2925  htab_delete (INSN_TRANSFORMED_INSNS (insn));
2926
2927  /* This is allocated only for bookkeeping insns.  */
2928  if (INSN_ORIGINATORS (insn))
2929    BITMAP_FREE (INSN_ORIGINATORS (insn));
2930  free_deps (&INSN_DEPS_CONTEXT (insn));
2931
2932  INSN_ANALYZED_DEPS (insn) = NULL;
2933
2934  /* Clear the readonly flag so we would ICE when trying to recalculate
2935     the deps context (as we believe that it should not happen).  */
2936  (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2937}
2938
2939/* Free the same data as above for INSN.  */
2940static void
2941free_first_time_insn_data (insn_t insn)
2942{
2943  gcc_assert (! first_time_insn_init (insn));
2944
2945  free_data_for_scheduled_insn (insn);
2946  return_regset_to_pool (INSN_LIVE (insn));
2947  INSN_LIVE (insn) = NULL;
2948  INSN_LIVE_VALID_P (insn) = false;
2949}
2950
2951/* Initialize region-scope data structures for basic blocks.  */
2952static void
2953init_global_and_expr_for_bb (basic_block bb)
2954{
2955  if (sel_bb_empty_p (bb))
2956    return;
2957
2958  invalidate_av_set (bb);
2959}
2960
2961/* Data for global dependency analysis (to initialize CANT_MOVE and
2962   SCHED_GROUP_P).  */
2963static struct
2964{
2965  /* Previous insn.  */
2966  insn_t prev_insn;
2967} init_global_data;
2968
2969/* Determine if INSN is in the sched_group, is an asm or should not be
2970   cloned.  After that initialize its expr.  */
2971static void
2972init_global_and_expr_for_insn (insn_t insn)
2973{
2974  if (LABEL_P (insn))
2975    return;
2976
2977  if (NOTE_INSN_BASIC_BLOCK_P (insn))
2978    {
2979      init_global_data.prev_insn = NULL;
2980      return;
2981    }
2982
2983  gcc_assert (INSN_P (insn));
2984
2985  if (SCHED_GROUP_P (insn))
2986    /* Setup a sched_group.  */
2987    {
2988      insn_t prev_insn = init_global_data.prev_insn;
2989
2990      if (prev_insn)
2991	INSN_SCHED_NEXT (prev_insn) = insn;
2992
2993      init_global_data.prev_insn = insn;
2994    }
2995  else
2996    init_global_data.prev_insn = NULL;
2997
2998  if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2999      || asm_noperands (PATTERN (insn)) >= 0)
3000    /* Mark INSN as an asm.  */
3001    INSN_ASM_P (insn) = true;
3002
3003  {
3004    bool force_unique_p;
3005    ds_t spec_done_ds;
3006
3007    /* Certain instructions cannot be cloned, and frame related insns and
3008       the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
3009       their block.  */
3010    if (prologue_epilogue_contains (insn))
3011      {
3012        if (RTX_FRAME_RELATED_P (insn))
3013          CANT_MOVE (insn) = 1;
3014        else
3015          {
3016            rtx note;
3017            for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3018              if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
3019                  && ((enum insn_note) INTVAL (XEXP (note, 0))
3020                      == NOTE_INSN_EPILOGUE_BEG))
3021                {
3022                  CANT_MOVE (insn) = 1;
3023                  break;
3024                }
3025          }
3026        force_unique_p = true;
3027      }
3028    else
3029      if (CANT_MOVE (insn)
3030          || INSN_ASM_P (insn)
3031          || SCHED_GROUP_P (insn)
3032	  || CALL_P (insn)
3033          /* Exception handling insns are always unique.  */
3034          || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
3035          /* TRAP_IF though have an INSN code is control_flow_insn_p ().  */
3036          || control_flow_insn_p (insn)
3037          || volatile_insn_p (PATTERN (insn))
3038          || (targetm.cannot_copy_insn_p
3039              && targetm.cannot_copy_insn_p (insn)))
3040        force_unique_p = true;
3041      else
3042        force_unique_p = false;
3043
3044    if (targetm.sched.get_insn_spec_ds)
3045      {
3046	spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3047	spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3048      }
3049    else
3050      spec_done_ds = 0;
3051
3052    /* Initialize INSN's expr.  */
3053    init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3054	       REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3055	       spec_done_ds, 0, 0, vNULL, true,
3056	       false, false, false, CANT_MOVE (insn));
3057  }
3058
3059  init_first_time_insn_data (insn);
3060}
3061
3062/* Scan the region and initialize instruction data for basic blocks BBS.  */
3063void
3064sel_init_global_and_expr (bb_vec_t bbs)
3065{
3066  /* ??? It would be nice to implement push / pop scheme for sched_infos.  */
3067  const struct sched_scan_info_def ssi =
3068    {
3069      NULL, /* extend_bb */
3070      init_global_and_expr_for_bb, /* init_bb */
3071      extend_insn_data, /* extend_insn */
3072      init_global_and_expr_for_insn /* init_insn */
3073    };
3074
3075  sched_scan (&ssi, bbs);
3076}
3077
3078/* Finalize region-scope data structures for basic blocks.  */
3079static void
3080finish_global_and_expr_for_bb (basic_block bb)
3081{
3082  av_set_clear (&BB_AV_SET (bb));
3083  BB_AV_LEVEL (bb) = 0;
3084}
3085
3086/* Finalize INSN's data.  */
3087static void
3088finish_global_and_expr_insn (insn_t insn)
3089{
3090  if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3091    return;
3092
3093  gcc_assert (INSN_P (insn));
3094
3095  if (INSN_LUID (insn) > 0)
3096    {
3097      free_first_time_insn_data (insn);
3098      INSN_WS_LEVEL (insn) = 0;
3099      CANT_MOVE (insn) = 0;
3100
3101      /* We can no longer assert this, as vinsns of this insn could be
3102         easily live in other insn's caches.  This should be changed to
3103         a counter-like approach among all vinsns.  */
3104      gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3105      clear_expr (INSN_EXPR (insn));
3106    }
3107}
3108
3109/* Finalize per instruction data for the whole region.  */
3110void
3111sel_finish_global_and_expr (void)
3112{
3113  {
3114    bb_vec_t bbs;
3115    int i;
3116
3117    bbs.create (current_nr_blocks);
3118
3119    for (i = 0; i < current_nr_blocks; i++)
3120      bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
3121
3122    /* Clear AV_SETs and INSN_EXPRs.  */
3123    {
3124      const struct sched_scan_info_def ssi =
3125	{
3126	  NULL, /* extend_bb */
3127	  finish_global_and_expr_for_bb, /* init_bb */
3128	  NULL, /* extend_insn */
3129	  finish_global_and_expr_insn /* init_insn */
3130	};
3131
3132      sched_scan (&ssi, bbs);
3133    }
3134
3135    bbs.release ();
3136  }
3137
3138  finish_insns ();
3139}
3140
3141
3142/* In the below hooks, we merely calculate whether or not a dependence
3143   exists, and in what part of insn.  However, we will need more data
3144   when we'll start caching dependence requests.  */
3145
3146/* Container to hold information for dependency analysis.  */
3147static struct
3148{
3149  deps_t dc;
3150
3151  /* A variable to track which part of rtx we are scanning in
3152     sched-deps.c: sched_analyze_insn ().  */
3153  deps_where_t where;
3154
3155  /* Current producer.  */
3156  insn_t pro;
3157
3158  /* Current consumer.  */
3159  vinsn_t con;
3160
3161  /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3162     X is from { INSN, LHS, RHS }.  */
3163  ds_t has_dep_p[DEPS_IN_NOWHERE];
3164} has_dependence_data;
3165
3166/* Start analyzing dependencies of INSN.  */
3167static void
3168has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3169{
3170  gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3171
3172  has_dependence_data.where = DEPS_IN_INSN;
3173}
3174
3175/* Finish analyzing dependencies of an insn.  */
3176static void
3177has_dependence_finish_insn (void)
3178{
3179  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3180
3181  has_dependence_data.where = DEPS_IN_NOWHERE;
3182}
3183
3184/* Start analyzing dependencies of LHS.  */
3185static void
3186has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3187{
3188  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3189
3190  if (VINSN_LHS (has_dependence_data.con) != NULL)
3191    has_dependence_data.where = DEPS_IN_LHS;
3192}
3193
3194/* Finish analyzing dependencies of an lhs.  */
3195static void
3196has_dependence_finish_lhs (void)
3197{
3198  has_dependence_data.where = DEPS_IN_INSN;
3199}
3200
3201/* Start analyzing dependencies of RHS.  */
3202static void
3203has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3204{
3205  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3206
3207  if (VINSN_RHS (has_dependence_data.con) != NULL)
3208    has_dependence_data.where = DEPS_IN_RHS;
3209}
3210
3211/* Start analyzing dependencies of an rhs.  */
3212static void
3213has_dependence_finish_rhs (void)
3214{
3215  gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3216	      || has_dependence_data.where == DEPS_IN_INSN);
3217
3218  has_dependence_data.where = DEPS_IN_INSN;
3219}
3220
3221/* Note a set of REGNO.  */
3222static void
3223has_dependence_note_reg_set (int regno)
3224{
3225  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3226
3227  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3228				       VINSN_INSN_RTX
3229				       (has_dependence_data.con)))
3230    {
3231      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3232
3233      if (reg_last->sets != NULL
3234	  || reg_last->clobbers != NULL)
3235	*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3236
3237      if (reg_last->uses || reg_last->implicit_sets)
3238	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3239    }
3240}
3241
3242/* Note a clobber of REGNO.  */
3243static void
3244has_dependence_note_reg_clobber (int regno)
3245{
3246  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3247
3248  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3249				       VINSN_INSN_RTX
3250				       (has_dependence_data.con)))
3251    {
3252      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3253
3254      if (reg_last->sets)
3255	*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3256
3257      if (reg_last->uses || reg_last->implicit_sets)
3258	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3259    }
3260}
3261
3262/* Note a use of REGNO.  */
3263static void
3264has_dependence_note_reg_use (int regno)
3265{
3266  struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3267
3268  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3269				       VINSN_INSN_RTX
3270				       (has_dependence_data.con)))
3271    {
3272      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3273
3274      if (reg_last->sets)
3275	*dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3276
3277      if (reg_last->clobbers || reg_last->implicit_sets)
3278	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3279
3280      /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3281	 is actually a check insn.  We need to do this for any register
3282	 read-read dependency with the check unless we track properly
3283	 all registers written by BE_IN_SPEC-speculated insns, as
3284	 we don't have explicit dependence lists.  See PR 53975.  */
3285      if (reg_last->uses)
3286	{
3287	  ds_t pro_spec_checked_ds;
3288
3289	  pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3290	  pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3291
3292	  if (pro_spec_checked_ds != 0)
3293	    *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3294				  NULL_RTX, NULL_RTX);
3295	}
3296    }
3297}
3298
3299/* Note a memory dependence.  */
3300static void
3301has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3302			     rtx pending_mem ATTRIBUTE_UNUSED,
3303			     insn_t pending_insn ATTRIBUTE_UNUSED,
3304			     ds_t ds ATTRIBUTE_UNUSED)
3305{
3306  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3307				       VINSN_INSN_RTX (has_dependence_data.con)))
3308    {
3309      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3310
3311      *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3312    }
3313}
3314
3315/* Note a dependence.  */
3316static void
3317has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3318			 ds_t ds ATTRIBUTE_UNUSED)
3319{
3320  if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3321				       VINSN_INSN_RTX (has_dependence_data.con)))
3322    {
3323      ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3324
3325      *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3326    }
3327}
3328
3329/* Mark the insn as having a hard dependence that prevents speculation.  */
3330void
3331sel_mark_hard_insn (rtx insn)
3332{
3333  int i;
3334
3335  /* Only work when we're in has_dependence_p mode.
3336     ??? This is a hack, this should actually be a hook.  */
3337  if (!has_dependence_data.dc || !has_dependence_data.pro)
3338    return;
3339
3340  gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3341  gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3342
3343  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3344    has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3345}
3346
3347/* This structure holds the hooks for the dependency analysis used when
3348   actually processing dependencies in the scheduler.  */
3349static struct sched_deps_info_def has_dependence_sched_deps_info;
3350
3351/* This initializes most of the fields of the above structure.  */
3352static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3353  {
3354    NULL,
3355
3356    has_dependence_start_insn,
3357    has_dependence_finish_insn,
3358    has_dependence_start_lhs,
3359    has_dependence_finish_lhs,
3360    has_dependence_start_rhs,
3361    has_dependence_finish_rhs,
3362    has_dependence_note_reg_set,
3363    has_dependence_note_reg_clobber,
3364    has_dependence_note_reg_use,
3365    has_dependence_note_mem_dep,
3366    has_dependence_note_dep,
3367
3368    0, /* use_cselib */
3369    0, /* use_deps_list */
3370    0 /* generate_spec_deps */
3371  };
3372
3373/* Initialize has_dependence_sched_deps_info with extra spec field.  */
3374static void
3375setup_has_dependence_sched_deps_info (void)
3376{
3377  memcpy (&has_dependence_sched_deps_info,
3378	  &const_has_dependence_sched_deps_info,
3379	  sizeof (has_dependence_sched_deps_info));
3380
3381  if (spec_info != NULL)
3382    has_dependence_sched_deps_info.generate_spec_deps = 1;
3383
3384  sched_deps_info = &has_dependence_sched_deps_info;
3385}
3386
3387/* Remove all dependences found and recorded in has_dependence_data array.  */
3388void
3389sel_clear_has_dependence (void)
3390{
3391  int i;
3392
3393  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3394    has_dependence_data.has_dep_p[i] = 0;
3395}
3396
3397/* Return nonzero if EXPR has is dependent upon PRED.  Return the pointer
3398   to the dependence information array in HAS_DEP_PP.  */
3399ds_t
3400has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3401{
3402  int i;
3403  ds_t ds;
3404  struct deps_desc *dc;
3405
3406  if (INSN_SIMPLEJUMP_P (pred))
3407    /* Unconditional jump is just a transfer of control flow.
3408       Ignore it.  */
3409    return false;
3410
3411  dc = &INSN_DEPS_CONTEXT (pred);
3412
3413  /* We init this field lazily.  */
3414  if (dc->reg_last == NULL)
3415    init_deps_reg_last (dc);
3416
3417  if (!dc->readonly)
3418    {
3419      has_dependence_data.pro = NULL;
3420      /* Initialize empty dep context with information about PRED.  */
3421      advance_deps_context (dc, pred);
3422      dc->readonly = 1;
3423    }
3424
3425  has_dependence_data.where = DEPS_IN_NOWHERE;
3426  has_dependence_data.pro = pred;
3427  has_dependence_data.con = EXPR_VINSN (expr);
3428  has_dependence_data.dc = dc;
3429
3430  sel_clear_has_dependence ();
3431
3432  /* Now catch all dependencies that would be generated between PRED and
3433     INSN.  */
3434  setup_has_dependence_sched_deps_info ();
3435  deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3436  has_dependence_data.dc = NULL;
3437
3438  /* When a barrier was found, set DEPS_IN_INSN bits.  */
3439  if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3440    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3441  else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3442    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3443
3444  /* Do not allow stores to memory to move through checks.  Currently
3445     we don't move this to sched-deps.c as the check doesn't have
3446     obvious places to which this dependence can be attached.
3447     FIMXE: this should go to a hook.  */
3448  if (EXPR_LHS (expr)
3449      && MEM_P (EXPR_LHS (expr))
3450      && sel_insn_is_speculation_check (pred))
3451    has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3452
3453  *has_dep_pp = has_dependence_data.has_dep_p;
3454  ds = 0;
3455  for (i = 0; i < DEPS_IN_NOWHERE; i++)
3456    ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3457			NULL_RTX, NULL_RTX);
3458
3459  return ds;
3460}
3461
3462
3463/* Dependence hooks implementation that checks dependence latency constraints
3464   on the insns being scheduled.  The entry point for these routines is
3465   tick_check_p predicate.  */
3466
3467static struct
3468{
3469  /* An expr we are currently checking.  */
3470  expr_t expr;
3471
3472  /* A minimal cycle for its scheduling.  */
3473  int cycle;
3474
3475  /* Whether we have seen a true dependence while checking.  */
3476  bool seen_true_dep_p;
3477} tick_check_data;
3478
3479/* Update minimal scheduling cycle for tick_check_insn given that it depends
3480   on PRO with status DS and weight DW.  */
3481static void
3482tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3483{
3484  expr_t con_expr = tick_check_data.expr;
3485  insn_t con_insn = EXPR_INSN_RTX (con_expr);
3486
3487  if (con_insn != pro_insn)
3488    {
3489      enum reg_note dt;
3490      int tick;
3491
3492      if (/* PROducer was removed from above due to pipelining.  */
3493	  !INSN_IN_STREAM_P (pro_insn)
3494	  /* Or PROducer was originally on the next iteration regarding the
3495	     CONsumer.  */
3496	  || (INSN_SCHED_TIMES (pro_insn)
3497	      - EXPR_SCHED_TIMES (con_expr)) > 1)
3498	/* Don't count this dependence.  */
3499        return;
3500
3501      dt = ds_to_dt (ds);
3502      if (dt == REG_DEP_TRUE)
3503        tick_check_data.seen_true_dep_p = true;
3504
3505      gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3506
3507      {
3508	dep_def _dep, *dep = &_dep;
3509
3510	init_dep (dep, pro_insn, con_insn, dt);
3511
3512	tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3513      }
3514
3515      /* When there are several kinds of dependencies between pro and con,
3516         only REG_DEP_TRUE should be taken into account.  */
3517      if (tick > tick_check_data.cycle
3518	  && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3519	tick_check_data.cycle = tick;
3520    }
3521}
3522
3523/* An implementation of note_dep hook.  */
3524static void
3525tick_check_note_dep (insn_t pro, ds_t ds)
3526{
3527  tick_check_dep_with_dw (pro, ds, 0);
3528}
3529
3530/* An implementation of note_mem_dep hook.  */
3531static void
3532tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3533{
3534  dw_t dw;
3535
3536  dw = (ds_to_dt (ds) == REG_DEP_TRUE
3537        ? estimate_dep_weak (mem1, mem2)
3538        : 0);
3539
3540  tick_check_dep_with_dw (pro, ds, dw);
3541}
3542
3543/* This structure contains hooks for dependence analysis used when determining
3544   whether an insn is ready for scheduling.  */
3545static struct sched_deps_info_def tick_check_sched_deps_info =
3546  {
3547    NULL,
3548
3549    NULL,
3550    NULL,
3551    NULL,
3552    NULL,
3553    NULL,
3554    NULL,
3555    haifa_note_reg_set,
3556    haifa_note_reg_clobber,
3557    haifa_note_reg_use,
3558    tick_check_note_mem_dep,
3559    tick_check_note_dep,
3560
3561    0, 0, 0
3562  };
3563
3564/* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3565   scheduled.  Return 0 if all data from producers in DC is ready.  */
3566int
3567tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3568{
3569  int cycles_left;
3570  /* Initialize variables.  */
3571  tick_check_data.expr = expr;
3572  tick_check_data.cycle = 0;
3573  tick_check_data.seen_true_dep_p = false;
3574  sched_deps_info = &tick_check_sched_deps_info;
3575
3576  gcc_assert (!dc->readonly);
3577  dc->readonly = 1;
3578  deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3579  dc->readonly = 0;
3580
3581  cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3582
3583  return cycles_left >= 0 ? cycles_left : 0;
3584}
3585
3586
3587/* Functions to work with insns.  */
3588
3589/* Returns true if LHS of INSN is the same as DEST of an insn
3590   being moved.  */
3591bool
3592lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3593{
3594  rtx lhs = INSN_LHS (insn);
3595
3596  if (lhs == NULL || dest == NULL)
3597    return false;
3598
3599  return rtx_equal_p (lhs, dest);
3600}
3601
3602/* Return s_i_d entry of INSN.  Callable from debugger.  */
3603sel_insn_data_def
3604insn_sid (insn_t insn)
3605{
3606  return *SID (insn);
3607}
3608
3609/* True when INSN is a speculative check.  We can tell this by looking
3610   at the data structures of the selective scheduler, not by examining
3611   the pattern.  */
3612bool
3613sel_insn_is_speculation_check (rtx insn)
3614{
3615  return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3616}
3617
3618/* Extracts machine mode MODE and destination location DST_LOC
3619   for given INSN.  */
3620void
3621get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode)
3622{
3623  rtx pat = PATTERN (insn);
3624
3625  gcc_assert (dst_loc);
3626  gcc_assert (GET_CODE (pat) == SET);
3627
3628  *dst_loc = SET_DEST (pat);
3629
3630  gcc_assert (*dst_loc);
3631  gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3632
3633  if (mode)
3634    *mode = GET_MODE (*dst_loc);
3635}
3636
3637/* Returns true when moving through JUMP will result in bookkeeping
3638   creation.  */
3639bool
3640bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3641{
3642  insn_t succ;
3643  succ_iterator si;
3644
3645  FOR_EACH_SUCC (succ, si, jump)
3646    if (sel_num_cfg_preds_gt_1 (succ))
3647      return true;
3648
3649  return false;
3650}
3651
3652/* Return 'true' if INSN is the only one in its basic block.  */
3653static bool
3654insn_is_the_only_one_in_bb_p (insn_t insn)
3655{
3656  return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3657}
3658
3659#ifdef ENABLE_CHECKING
3660/* Check that the region we're scheduling still has at most one
3661   backedge.  */
3662static void
3663verify_backedges (void)
3664{
3665  if (pipelining_p)
3666    {
3667      int i, n = 0;
3668      edge e;
3669      edge_iterator ei;
3670
3671      for (i = 0; i < current_nr_blocks; i++)
3672        FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
3673          if (in_current_region_p (e->dest)
3674              && BLOCK_TO_BB (e->dest->index) < i)
3675            n++;
3676
3677      gcc_assert (n <= 1);
3678    }
3679}
3680#endif
3681
3682
3683/* Functions to work with control flow.  */
3684
3685/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3686   are sorted in topological order (it might have been invalidated by
3687   redirecting an edge).  */
3688static void
3689sel_recompute_toporder (void)
3690{
3691  int i, n, rgn;
3692  int *postorder, n_blocks;
3693
3694  postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
3695  n_blocks = post_order_compute (postorder, false, false);
3696
3697  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3698  for (n = 0, i = n_blocks - 1; i >= 0; i--)
3699    if (CONTAINING_RGN (postorder[i]) == rgn)
3700      {
3701	BLOCK_TO_BB (postorder[i]) = n;
3702	BB_TO_BLOCK (n) = postorder[i];
3703	n++;
3704      }
3705
3706  /* Assert that we updated info for all blocks.  We may miss some blocks if
3707     this function is called when redirecting an edge made a block
3708     unreachable, but that block is not deleted yet.  */
3709  gcc_assert (n == RGN_NR_BLOCKS (rgn));
3710}
3711
3712/* Tidy the possibly empty block BB.  */
3713static bool
3714maybe_tidy_empty_bb (basic_block bb)
3715{
3716  basic_block succ_bb, pred_bb, note_bb;
3717  vec<basic_block> dom_bbs;
3718  edge e;
3719  edge_iterator ei;
3720  bool rescan_p;
3721
3722  /* Keep empty bb only if this block immediately precedes EXIT and
3723     has incoming non-fallthrough edge, or it has no predecessors or
3724     successors.  Otherwise remove it.  */
3725  if (!sel_bb_empty_p (bb)
3726      || (single_succ_p (bb)
3727	  && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
3728          && (!single_pred_p (bb)
3729              || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3730      || EDGE_COUNT (bb->preds) == 0
3731      || EDGE_COUNT (bb->succs) == 0)
3732    return false;
3733
3734  /* Do not attempt to redirect complex edges.  */
3735  FOR_EACH_EDGE (e, ei, bb->preds)
3736    if (e->flags & EDGE_COMPLEX)
3737      return false;
3738    else if (e->flags & EDGE_FALLTHRU)
3739      {
3740	rtx note;
3741	/* If prev bb ends with asm goto, see if any of the
3742	   ASM_OPERANDS_LABELs don't point to the fallthru
3743	   label.  Do not attempt to redirect it in that case.  */
3744	if (JUMP_P (BB_END (e->src))
3745	    && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3746	  {
3747	    int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3748
3749	    for (i = 0; i < n; ++i)
3750	      if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3751		return false;
3752	  }
3753      }
3754
3755  free_data_sets (bb);
3756
3757  /* Do not delete BB if it has more than one successor.
3758     That can occur when we moving a jump.  */
3759  if (!single_succ_p (bb))
3760    {
3761      gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3762      sel_merge_blocks (bb->prev_bb, bb);
3763      return true;
3764    }
3765
3766  succ_bb = single_succ (bb);
3767  rescan_p = true;
3768  pred_bb = NULL;
3769  dom_bbs.create (0);
3770
3771  /* Save a pred/succ from the current region to attach the notes to.  */
3772  note_bb = NULL;
3773  FOR_EACH_EDGE (e, ei, bb->preds)
3774    if (in_current_region_p (e->src))
3775      {
3776	note_bb = e->src;
3777	break;
3778      }
3779  if (note_bb == NULL)
3780    note_bb = succ_bb;
3781
3782  /* Redirect all non-fallthru edges to the next bb.  */
3783  while (rescan_p)
3784    {
3785      rescan_p = false;
3786
3787      FOR_EACH_EDGE (e, ei, bb->preds)
3788        {
3789          pred_bb = e->src;
3790
3791          if (!(e->flags & EDGE_FALLTHRU))
3792            {
3793	      /* We can not invalidate computed topological order by moving
3794	         the edge destination block (E->SUCC) along a fallthru edge.
3795
3796		 We will update dominators here only when we'll get
3797		 an unreachable block when redirecting, otherwise
3798		 sel_redirect_edge_and_branch will take care of it.  */
3799	      if (e->dest != bb
3800		  && single_pred_p (e->dest))
3801		dom_bbs.safe_push (e->dest);
3802              sel_redirect_edge_and_branch (e, succ_bb);
3803              rescan_p = true;
3804              break;
3805            }
3806	  /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3807	     to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3808	     still have to adjust it.  */
3809	  else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3810	    {
3811	      /* If possible, try to remove the unneeded conditional jump.  */
3812	      if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3813		  && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3814		{
3815		  if (!sel_remove_insn (BB_END (pred_bb), false, false))
3816		    tidy_fallthru_edge (e);
3817		}
3818	      else
3819		sel_redirect_edge_and_branch (e, succ_bb);
3820	      rescan_p = true;
3821	      break;
3822	    }
3823        }
3824    }
3825
3826  if (can_merge_blocks_p (bb->prev_bb, bb))
3827    sel_merge_blocks (bb->prev_bb, bb);
3828  else
3829    {
3830      /* This is a block without fallthru predecessor.  Just delete it.  */
3831      gcc_assert (note_bb);
3832      move_bb_info (note_bb, bb);
3833      remove_empty_bb (bb, true);
3834    }
3835
3836  if (!dom_bbs.is_empty ())
3837    {
3838      dom_bbs.safe_push (succ_bb);
3839      iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3840      dom_bbs.release ();
3841    }
3842
3843  return true;
3844}
3845
3846/* Tidy the control flow after we have removed original insn from
3847   XBB.  Return true if we have removed some blocks.  When FULL_TIDYING
3848   is true, also try to optimize control flow on non-empty blocks.  */
3849bool
3850tidy_control_flow (basic_block xbb, bool full_tidying)
3851{
3852  bool changed = true;
3853  insn_t first, last;
3854
3855  /* First check whether XBB is empty.  */
3856  changed = maybe_tidy_empty_bb (xbb);
3857  if (changed || !full_tidying)
3858    return changed;
3859
3860  /* Check if there is a unnecessary jump after insn left.  */
3861  if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3862      && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3863      && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3864    {
3865      if (sel_remove_insn (BB_END (xbb), false, false))
3866        return true;
3867      tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3868    }
3869
3870  first = sel_bb_head (xbb);
3871  last = sel_bb_end (xbb);
3872  if (MAY_HAVE_DEBUG_INSNS)
3873    {
3874      if (first != last && DEBUG_INSN_P (first))
3875	do
3876	  first = NEXT_INSN (first);
3877	while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3878
3879      if (first != last && DEBUG_INSN_P (last))
3880	do
3881	  last = PREV_INSN (last);
3882	while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3883    }
3884  /* Check if there is an unnecessary jump in previous basic block leading
3885     to next basic block left after removing INSN from stream.
3886     If it is so, remove that jump and redirect edge to current
3887     basic block (where there was INSN before deletion).  This way
3888     when NOP will be deleted several instructions later with its
3889     basic block we will not get a jump to next instruction, which
3890     can be harmful.  */
3891  if (first == last
3892      && !sel_bb_empty_p (xbb)
3893      && INSN_NOP_P (last)
3894      /* Flow goes fallthru from current block to the next.  */
3895      && EDGE_COUNT (xbb->succs) == 1
3896      && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3897      /* When successor is an EXIT block, it may not be the next block.  */
3898      && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
3899      /* And unconditional jump in previous basic block leads to
3900         next basic block of XBB and this jump can be safely removed.  */
3901      && in_current_region_p (xbb->prev_bb)
3902      && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3903      && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3904      /* Also this jump is not at the scheduling boundary.  */
3905      && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3906    {
3907      bool recompute_toporder_p;
3908      /* Clear data structures of jump - jump itself will be removed
3909         by sel_redirect_edge_and_branch.  */
3910      clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3911      recompute_toporder_p
3912        = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3913
3914      gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3915
3916      /* It can turn out that after removing unused jump, basic block
3917         that contained that jump, becomes empty too.  In such case
3918         remove it too.  */
3919      if (sel_bb_empty_p (xbb->prev_bb))
3920        changed = maybe_tidy_empty_bb (xbb->prev_bb);
3921      if (recompute_toporder_p)
3922	sel_recompute_toporder ();
3923    }
3924
3925#ifdef ENABLE_CHECKING
3926  verify_backedges ();
3927  verify_dominators (CDI_DOMINATORS);
3928#endif
3929
3930  return changed;
3931}
3932
3933/* Purge meaningless empty blocks in the middle of a region.  */
3934void
3935purge_empty_blocks (void)
3936{
3937  int i;
3938
3939  /* Do not attempt to delete the first basic block in the region.  */
3940  for (i = 1; i < current_nr_blocks; )
3941    {
3942      basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
3943
3944      if (maybe_tidy_empty_bb (b))
3945	continue;
3946
3947      i++;
3948    }
3949}
3950
3951/* Rip-off INSN from the insn stream.  When ONLY_DISCONNECT is true,
3952   do not delete insn's data, because it will be later re-emitted.
3953   Return true if we have removed some blocks afterwards.  */
3954bool
3955sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3956{
3957  basic_block bb = BLOCK_FOR_INSN (insn);
3958
3959  gcc_assert (INSN_IN_STREAM_P (insn));
3960
3961  if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3962    {
3963      expr_t expr;
3964      av_set_iterator i;
3965
3966      /* When we remove a debug insn that is head of a BB, it remains
3967	 in the AV_SET of the block, but it shouldn't.  */
3968      FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3969	if (EXPR_INSN_RTX (expr) == insn)
3970	  {
3971	    av_set_iter_remove (&i);
3972	    break;
3973	  }
3974    }
3975
3976  if (only_disconnect)
3977    remove_insn (insn);
3978  else
3979    {
3980      delete_insn (insn);
3981      clear_expr (INSN_EXPR (insn));
3982    }
3983
3984  /* It is necessary to NULL these fields in case we are going to re-insert
3985     INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3986     case, but also for NOPs that we will return to the nop pool.  */
3987  SET_PREV_INSN (insn) = NULL_RTX;
3988  SET_NEXT_INSN (insn) = NULL_RTX;
3989  set_block_for_insn (insn, NULL);
3990
3991  return tidy_control_flow (bb, full_tidying);
3992}
3993
3994/* Estimate number of the insns in BB.  */
3995static int
3996sel_estimate_number_of_insns (basic_block bb)
3997{
3998  int res = 0;
3999  insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
4000
4001  for (; insn != next_tail; insn = NEXT_INSN (insn))
4002    if (NONDEBUG_INSN_P (insn))
4003      res++;
4004
4005  return res;
4006}
4007
4008/* We don't need separate luids for notes or labels.  */
4009static int
4010sel_luid_for_non_insn (rtx x)
4011{
4012  gcc_assert (NOTE_P (x) || LABEL_P (x));
4013
4014  return -1;
4015}
4016
4017/*  Find the proper seqno for inserting at INSN by successors.
4018    Return -1 if no successors with positive seqno exist.  */
4019static int
4020get_seqno_by_succs (rtx_insn *insn)
4021{
4022  basic_block bb = BLOCK_FOR_INSN (insn);
4023  rtx_insn *tmp = insn, *end = BB_END (bb);
4024  int seqno;
4025  insn_t succ = NULL;
4026  succ_iterator si;
4027
4028  while (tmp != end)
4029    {
4030      tmp = NEXT_INSN (tmp);
4031      if (INSN_P (tmp))
4032        return INSN_SEQNO (tmp);
4033    }
4034
4035  seqno = INT_MAX;
4036
4037  FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4038    if (INSN_SEQNO (succ) > 0)
4039      seqno = MIN (seqno, INSN_SEQNO (succ));
4040
4041  if (seqno == INT_MAX)
4042    return -1;
4043
4044  return seqno;
4045}
4046
4047/* Compute seqno for INSN by its preds or succs.  Use OLD_SEQNO to compute
4048   seqno in corner cases.  */
4049static int
4050get_seqno_for_a_jump (insn_t insn, int old_seqno)
4051{
4052  int seqno;
4053
4054  gcc_assert (INSN_SIMPLEJUMP_P (insn));
4055
4056  if (!sel_bb_head_p (insn))
4057    seqno = INSN_SEQNO (PREV_INSN (insn));
4058  else
4059    {
4060      basic_block bb = BLOCK_FOR_INSN (insn);
4061
4062      if (single_pred_p (bb)
4063	  && !in_current_region_p (single_pred (bb)))
4064	{
4065          /* We can have preds outside a region when splitting edges
4066             for pipelining of an outer loop.  Use succ instead.
4067             There should be only one of them.  */
4068	  insn_t succ = NULL;
4069          succ_iterator si;
4070          bool first = true;
4071
4072	  gcc_assert (flag_sel_sched_pipelining_outer_loops
4073		      && current_loop_nest);
4074          FOR_EACH_SUCC_1 (succ, si, insn,
4075                           SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4076            {
4077              gcc_assert (first);
4078              first = false;
4079            }
4080
4081	  gcc_assert (succ != NULL);
4082	  seqno = INSN_SEQNO (succ);
4083	}
4084      else
4085	{
4086	  insn_t *preds;
4087	  int n;
4088
4089	  cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4090
4091	  gcc_assert (n > 0);
4092	  /* For one predecessor, use simple method.  */
4093	  if (n == 1)
4094	    seqno = INSN_SEQNO (preds[0]);
4095	  else
4096	    seqno = get_seqno_by_preds (insn);
4097
4098	  free (preds);
4099	}
4100    }
4101
4102  /* We were unable to find a good seqno among preds.  */
4103  if (seqno < 0)
4104    seqno = get_seqno_by_succs (insn);
4105
4106  if (seqno < 0)
4107    {
4108      /* The only case where this could be here legally is that the only
4109	 unscheduled insn was a conditional jump that got removed and turned
4110	 into this unconditional one.  Initialize from the old seqno
4111	 of that jump passed down to here.  */
4112      seqno = old_seqno;
4113    }
4114
4115  gcc_assert (seqno >= 0);
4116  return seqno;
4117}
4118
4119/*  Find the proper seqno for inserting at INSN.  Returns -1 if no predecessors
4120    with positive seqno exist.  */
4121int
4122get_seqno_by_preds (rtx_insn *insn)
4123{
4124  basic_block bb = BLOCK_FOR_INSN (insn);
4125  rtx_insn *tmp = insn, *head = BB_HEAD (bb);
4126  insn_t *preds;
4127  int n, i, seqno;
4128
4129  /* Loop backwards from INSN to HEAD including both.  */
4130  while (1)
4131    {
4132      if (INSN_P (tmp))
4133	return INSN_SEQNO (tmp);
4134      if (tmp == head)
4135	break;
4136      tmp = PREV_INSN (tmp);
4137    }
4138
4139  cfg_preds (bb, &preds, &n);
4140  for (i = 0, seqno = -1; i < n; i++)
4141    seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4142
4143  return seqno;
4144}
4145
4146
4147
4148/* Extend pass-scope data structures for basic blocks.  */
4149void
4150sel_extend_global_bb_info (void)
4151{
4152  sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4153}
4154
4155/* Extend region-scope data structures for basic blocks.  */
4156static void
4157extend_region_bb_info (void)
4158{
4159  sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4160}
4161
4162/* Extend all data structures to fit for all basic blocks.  */
4163static void
4164extend_bb_info (void)
4165{
4166  sel_extend_global_bb_info ();
4167  extend_region_bb_info ();
4168}
4169
4170/* Finalize pass-scope data structures for basic blocks.  */
4171void
4172sel_finish_global_bb_info (void)
4173{
4174  sel_global_bb_info.release ();
4175}
4176
4177/* Finalize region-scope data structures for basic blocks.  */
4178static void
4179finish_region_bb_info (void)
4180{
4181  sel_region_bb_info.release ();
4182}
4183
4184
4185/* Data for each insn in current region.  */
4186vec<sel_insn_data_def> s_i_d = vNULL;
4187
4188/* Extend data structures for insns from current region.  */
4189static void
4190extend_insn_data (void)
4191{
4192  int reserve;
4193
4194  sched_extend_target ();
4195  sched_deps_init (false);
4196
4197  /* Extend data structures for insns from current region.  */
4198  reserve = (sched_max_luid + 1 - s_i_d.length ());
4199  if (reserve > 0 && ! s_i_d.space (reserve))
4200    {
4201      int size;
4202
4203      if (sched_max_luid / 2 > 1024)
4204        size = sched_max_luid + 1024;
4205      else
4206        size = 3 * sched_max_luid / 2;
4207
4208
4209      s_i_d.safe_grow_cleared (size);
4210    }
4211}
4212
4213/* Finalize data structures for insns from current region.  */
4214static void
4215finish_insns (void)
4216{
4217  unsigned i;
4218
4219  /* Clear here all dependence contexts that may have left from insns that were
4220     removed during the scheduling.  */
4221  for (i = 0; i < s_i_d.length (); i++)
4222    {
4223      sel_insn_data_def *sid_entry = &s_i_d[i];
4224
4225      if (sid_entry->live)
4226        return_regset_to_pool (sid_entry->live);
4227      if (sid_entry->analyzed_deps)
4228	{
4229	  BITMAP_FREE (sid_entry->analyzed_deps);
4230	  BITMAP_FREE (sid_entry->found_deps);
4231          htab_delete (sid_entry->transformed_insns);
4232	  free_deps (&sid_entry->deps_context);
4233	}
4234      if (EXPR_VINSN (&sid_entry->expr))
4235        {
4236          clear_expr (&sid_entry->expr);
4237
4238          /* Also, clear CANT_MOVE bit here, because we really don't want it
4239             to be passed to the next region.  */
4240          CANT_MOVE_BY_LUID (i) = 0;
4241        }
4242    }
4243
4244  s_i_d.release ();
4245}
4246
4247/* A proxy to pass initialization data to init_insn ().  */
4248static sel_insn_data_def _insn_init_ssid;
4249static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4250
4251/* If true create a new vinsn.  Otherwise use the one from EXPR.  */
4252static bool insn_init_create_new_vinsn_p;
4253
4254/* Set all necessary data for initialization of the new insn[s].  */
4255static expr_t
4256set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4257{
4258  expr_t x = &insn_init_ssid->expr;
4259
4260  copy_expr_onside (x, expr);
4261  if (vi != NULL)
4262    {
4263      insn_init_create_new_vinsn_p = false;
4264      change_vinsn_in_expr (x, vi);
4265    }
4266  else
4267    insn_init_create_new_vinsn_p = true;
4268
4269  insn_init_ssid->seqno = seqno;
4270  return x;
4271}
4272
4273/* Init data for INSN.  */
4274static void
4275init_insn_data (insn_t insn)
4276{
4277  expr_t expr;
4278  sel_insn_data_t ssid = insn_init_ssid;
4279
4280  /* The fields mentioned below are special and hence are not being
4281     propagated to the new insns.  */
4282  gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4283	      && !ssid->after_stall_p && ssid->sched_cycle == 0);
4284  gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4285
4286  expr = INSN_EXPR (insn);
4287  copy_expr (expr, &ssid->expr);
4288  prepare_insn_expr (insn, ssid->seqno);
4289
4290  if (insn_init_create_new_vinsn_p)
4291    change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4292
4293  if (first_time_insn_init (insn))
4294    init_first_time_insn_data (insn);
4295}
4296
4297/* This is used to initialize spurious jumps generated by
4298   sel_redirect_edge ().  OLD_SEQNO is used for initializing seqnos
4299   in corner cases within get_seqno_for_a_jump.  */
4300static void
4301init_simplejump_data (insn_t insn, int old_seqno)
4302{
4303  init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4304	     REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4305	     vNULL, true, false, false,
4306	     false, true);
4307  INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno);
4308  init_first_time_insn_data (insn);
4309}
4310
4311/* Perform deferred initialization of insns.  This is used to process
4312   a new jump that may be created by redirect_edge.  OLD_SEQNO is used
4313   for initializing simplejumps in init_simplejump_data.  */
4314static void
4315sel_init_new_insn (insn_t insn, int flags, int old_seqno)
4316{
4317  /* We create data structures for bb when the first insn is emitted in it.  */
4318  if (INSN_P (insn)
4319      && INSN_IN_STREAM_P (insn)
4320      && insn_is_the_only_one_in_bb_p (insn))
4321    {
4322      extend_bb_info ();
4323      create_initial_data_sets (BLOCK_FOR_INSN (insn));
4324    }
4325
4326  if (flags & INSN_INIT_TODO_LUID)
4327    {
4328      sched_extend_luids ();
4329      sched_init_insn_luid (insn);
4330    }
4331
4332  if (flags & INSN_INIT_TODO_SSID)
4333    {
4334      extend_insn_data ();
4335      init_insn_data (insn);
4336      clear_expr (&insn_init_ssid->expr);
4337    }
4338
4339  if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4340    {
4341      extend_insn_data ();
4342      init_simplejump_data (insn, old_seqno);
4343    }
4344
4345  gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4346              == CONTAINING_RGN (BB_TO_BLOCK (0)));
4347}
4348
4349
4350/* Functions to init/finish work with lv sets.  */
4351
4352/* Init BB_LV_SET of BB from DF_LR_IN set of BB.  */
4353static void
4354init_lv_set (basic_block bb)
4355{
4356  gcc_assert (!BB_LV_SET_VALID_P (bb));
4357
4358  BB_LV_SET (bb) = get_regset_from_pool ();
4359  COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4360  BB_LV_SET_VALID_P (bb) = true;
4361}
4362
4363/* Copy liveness information to BB from FROM_BB.  */
4364static void
4365copy_lv_set_from (basic_block bb, basic_block from_bb)
4366{
4367  gcc_assert (!BB_LV_SET_VALID_P (bb));
4368
4369  COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4370  BB_LV_SET_VALID_P (bb) = true;
4371}
4372
4373/* Initialize lv set of all bb headers.  */
4374void
4375init_lv_sets (void)
4376{
4377  basic_block bb;
4378
4379  /* Initialize of LV sets.  */
4380  FOR_EACH_BB_FN (bb, cfun)
4381    init_lv_set (bb);
4382
4383  /* Don't forget EXIT_BLOCK.  */
4384  init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4385}
4386
4387/* Release lv set of HEAD.  */
4388static void
4389free_lv_set (basic_block bb)
4390{
4391  gcc_assert (BB_LV_SET (bb) != NULL);
4392
4393  return_regset_to_pool (BB_LV_SET (bb));
4394  BB_LV_SET (bb) = NULL;
4395  BB_LV_SET_VALID_P (bb) = false;
4396}
4397
4398/* Finalize lv sets of all bb headers.  */
4399void
4400free_lv_sets (void)
4401{
4402  basic_block bb;
4403
4404  /* Don't forget EXIT_BLOCK.  */
4405  free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4406
4407  /* Free LV sets.  */
4408  FOR_EACH_BB_FN (bb, cfun)
4409    if (BB_LV_SET (bb))
4410      free_lv_set (bb);
4411}
4412
4413/* Mark AV_SET for BB as invalid, so this set will be updated the next time
4414   compute_av() processes BB.  This function is called when creating new basic
4415   blocks, as well as for blocks (either new or existing) where new jumps are
4416   created when the control flow is being updated.  */
4417static void
4418invalidate_av_set (basic_block bb)
4419{
4420  BB_AV_LEVEL (bb) = -1;
4421}
4422
4423/* Create initial data sets for BB (they will be invalid).  */
4424static void
4425create_initial_data_sets (basic_block bb)
4426{
4427  if (BB_LV_SET (bb))
4428    BB_LV_SET_VALID_P (bb) = false;
4429  else
4430    BB_LV_SET (bb) = get_regset_from_pool ();
4431  invalidate_av_set (bb);
4432}
4433
4434/* Free av set of BB.  */
4435static void
4436free_av_set (basic_block bb)
4437{
4438  av_set_clear (&BB_AV_SET (bb));
4439  BB_AV_LEVEL (bb) = 0;
4440}
4441
4442/* Free data sets of BB.  */
4443void
4444free_data_sets (basic_block bb)
4445{
4446  free_lv_set (bb);
4447  free_av_set (bb);
4448}
4449
4450/* Exchange lv sets of TO and FROM.  */
4451static void
4452exchange_lv_sets (basic_block to, basic_block from)
4453{
4454  {
4455    regset to_lv_set = BB_LV_SET (to);
4456
4457    BB_LV_SET (to) = BB_LV_SET (from);
4458    BB_LV_SET (from) = to_lv_set;
4459  }
4460
4461  {
4462    bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4463
4464    BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4465    BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4466  }
4467}
4468
4469
4470/* Exchange av sets of TO and FROM.  */
4471static void
4472exchange_av_sets (basic_block to, basic_block from)
4473{
4474  {
4475    av_set_t to_av_set = BB_AV_SET (to);
4476
4477    BB_AV_SET (to) = BB_AV_SET (from);
4478    BB_AV_SET (from) = to_av_set;
4479  }
4480
4481  {
4482    int to_av_level = BB_AV_LEVEL (to);
4483
4484    BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4485    BB_AV_LEVEL (from) = to_av_level;
4486  }
4487}
4488
4489/* Exchange data sets of TO and FROM.  */
4490void
4491exchange_data_sets (basic_block to, basic_block from)
4492{
4493  exchange_lv_sets (to, from);
4494  exchange_av_sets (to, from);
4495}
4496
4497/* Copy data sets of FROM to TO.  */
4498void
4499copy_data_sets (basic_block to, basic_block from)
4500{
4501  gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4502  gcc_assert (BB_AV_SET (to) == NULL);
4503
4504  BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4505  BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4506
4507  if (BB_AV_SET_VALID_P (from))
4508    {
4509      BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4510    }
4511  if (BB_LV_SET_VALID_P (from))
4512    {
4513      gcc_assert (BB_LV_SET (to) != NULL);
4514      COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4515    }
4516}
4517
4518/* Return an av set for INSN, if any.  */
4519av_set_t
4520get_av_set (insn_t insn)
4521{
4522  av_set_t av_set;
4523
4524  gcc_assert (AV_SET_VALID_P (insn));
4525
4526  if (sel_bb_head_p (insn))
4527    av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4528  else
4529    av_set = NULL;
4530
4531  return av_set;
4532}
4533
4534/* Implementation of AV_LEVEL () macro.  Return AV_LEVEL () of INSN.  */
4535int
4536get_av_level (insn_t insn)
4537{
4538  int av_level;
4539
4540  gcc_assert (INSN_P (insn));
4541
4542  if (sel_bb_head_p (insn))
4543    av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4544  else
4545    av_level = INSN_WS_LEVEL (insn);
4546
4547  return av_level;
4548}
4549
4550
4551
4552/* Variables to work with control-flow graph.  */
4553
4554/* The basic block that already has been processed by the sched_data_update (),
4555   but hasn't been in sel_add_bb () yet.  */
4556static vec<basic_block>
4557    last_added_blocks = vNULL;
4558
4559/* A pool for allocating successor infos.  */
4560static struct
4561{
4562  /* A stack for saving succs_info structures.  */
4563  struct succs_info *stack;
4564
4565  /* Its size.  */
4566  int size;
4567
4568  /* Top of the stack.  */
4569  int top;
4570
4571  /* Maximal value of the top.  */
4572  int max_top;
4573}  succs_info_pool;
4574
4575/* Functions to work with control-flow graph.  */
4576
4577/* Return basic block note of BB.  */
4578rtx_insn *
4579sel_bb_head (basic_block bb)
4580{
4581  rtx_insn *head;
4582
4583  if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4584    {
4585      gcc_assert (exit_insn != NULL_RTX);
4586      head = exit_insn;
4587    }
4588  else
4589    {
4590      insn_t note;
4591
4592      note = bb_note (bb);
4593      head = next_nonnote_insn (note);
4594
4595      if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4596	head = NULL;
4597    }
4598
4599  return head;
4600}
4601
4602/* Return true if INSN is a basic block header.  */
4603bool
4604sel_bb_head_p (insn_t insn)
4605{
4606  return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4607}
4608
4609/* Return last insn of BB.  */
4610rtx_insn *
4611sel_bb_end (basic_block bb)
4612{
4613  if (sel_bb_empty_p (bb))
4614    return NULL;
4615
4616  gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
4617
4618  return BB_END (bb);
4619}
4620
4621/* Return true if INSN is the last insn in its basic block.  */
4622bool
4623sel_bb_end_p (insn_t insn)
4624{
4625  return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4626}
4627
4628/* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK.  */
4629bool
4630sel_bb_empty_p (basic_block bb)
4631{
4632  return sel_bb_head (bb) == NULL;
4633}
4634
4635/* True when BB belongs to the current scheduling region.  */
4636bool
4637in_current_region_p (basic_block bb)
4638{
4639  if (bb->index < NUM_FIXED_BLOCKS)
4640    return false;
4641
4642  return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4643}
4644
4645/* Return the block which is a fallthru bb of a conditional jump JUMP.  */
4646basic_block
4647fallthru_bb_of_jump (const rtx_insn *jump)
4648{
4649  if (!JUMP_P (jump))
4650    return NULL;
4651
4652  if (!any_condjump_p (jump))
4653    return NULL;
4654
4655  /* A basic block that ends with a conditional jump may still have one successor
4656     (and be followed by a barrier), we are not interested.  */
4657  if (single_succ_p (BLOCK_FOR_INSN (jump)))
4658    return NULL;
4659
4660  return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4661}
4662
4663/* Remove all notes from BB.  */
4664static void
4665init_bb (basic_block bb)
4666{
4667  remove_notes (bb_note (bb), BB_END (bb));
4668  BB_NOTE_LIST (bb) = note_list;
4669}
4670
4671void
4672sel_init_bbs (bb_vec_t bbs)
4673{
4674  const struct sched_scan_info_def ssi =
4675    {
4676      extend_bb_info, /* extend_bb */
4677      init_bb, /* init_bb */
4678      NULL, /* extend_insn */
4679      NULL /* init_insn */
4680    };
4681
4682  sched_scan (&ssi, bbs);
4683}
4684
4685/* Restore notes for the whole region.  */
4686static void
4687sel_restore_notes (void)
4688{
4689  int bb;
4690  insn_t insn;
4691
4692  for (bb = 0; bb < current_nr_blocks; bb++)
4693    {
4694      basic_block first, last;
4695
4696      first = EBB_FIRST_BB (bb);
4697      last = EBB_LAST_BB (bb)->next_bb;
4698
4699      do
4700	{
4701	  note_list = BB_NOTE_LIST (first);
4702	  restore_other_notes (NULL, first);
4703	  BB_NOTE_LIST (first) = NULL;
4704
4705	  FOR_BB_INSNS (first, insn)
4706	    if (NONDEBUG_INSN_P (insn))
4707	      reemit_notes (insn);
4708
4709          first = first->next_bb;
4710	}
4711      while (first != last);
4712    }
4713}
4714
4715/* Free per-bb data structures.  */
4716void
4717sel_finish_bbs (void)
4718{
4719  sel_restore_notes ();
4720
4721  /* Remove current loop preheader from this loop.  */
4722  if (current_loop_nest)
4723    sel_remove_loop_preheader ();
4724
4725  finish_region_bb_info ();
4726}
4727
4728/* Return true if INSN has a single successor of type FLAGS.  */
4729bool
4730sel_insn_has_single_succ_p (insn_t insn, int flags)
4731{
4732  insn_t succ;
4733  succ_iterator si;
4734  bool first_p = true;
4735
4736  FOR_EACH_SUCC_1 (succ, si, insn, flags)
4737    {
4738      if (first_p)
4739	first_p = false;
4740      else
4741	return false;
4742    }
4743
4744  return true;
4745}
4746
4747/* Allocate successor's info.  */
4748static struct succs_info *
4749alloc_succs_info (void)
4750{
4751  if (succs_info_pool.top == succs_info_pool.max_top)
4752    {
4753      int i;
4754
4755      if (++succs_info_pool.max_top >= succs_info_pool.size)
4756        gcc_unreachable ();
4757
4758      i = ++succs_info_pool.top;
4759      succs_info_pool.stack[i].succs_ok.create (10);
4760      succs_info_pool.stack[i].succs_other.create (10);
4761      succs_info_pool.stack[i].probs_ok.create (10);
4762    }
4763  else
4764    succs_info_pool.top++;
4765
4766  return &succs_info_pool.stack[succs_info_pool.top];
4767}
4768
4769/* Free successor's info.  */
4770void
4771free_succs_info (struct succs_info * sinfo)
4772{
4773  gcc_assert (succs_info_pool.top >= 0
4774              && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4775  succs_info_pool.top--;
4776
4777  /* Clear stale info.  */
4778  sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4779  sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4780  sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4781  sinfo->all_prob = 0;
4782  sinfo->succs_ok_n = 0;
4783  sinfo->all_succs_n = 0;
4784}
4785
4786/* Compute successor info for INSN.  FLAGS are the flags passed
4787   to the FOR_EACH_SUCC_1 iterator.  */
4788struct succs_info *
4789compute_succs_info (insn_t insn, short flags)
4790{
4791  succ_iterator si;
4792  insn_t succ;
4793  struct succs_info *sinfo = alloc_succs_info ();
4794
4795  /* Traverse *all* successors and decide what to do with each.  */
4796  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4797    {
4798      /* FIXME: this doesn't work for skipping to loop exits, as we don't
4799         perform code motion through inner loops.  */
4800      short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4801
4802      if (current_flags & flags)
4803        {
4804          sinfo->succs_ok.safe_push (succ);
4805          sinfo->probs_ok.safe_push (
4806		    /* FIXME: Improve calculation when skipping
4807                       inner loop to exits.  */
4808                    si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
4809          sinfo->succs_ok_n++;
4810        }
4811      else
4812        sinfo->succs_other.safe_push (succ);
4813
4814      /* Compute all_prob.  */
4815      if (!si.bb_end)
4816        sinfo->all_prob = REG_BR_PROB_BASE;
4817      else
4818        sinfo->all_prob += si.e1->probability;
4819
4820      sinfo->all_succs_n++;
4821    }
4822
4823  return sinfo;
4824}
4825
4826/* Return the predecessors of BB in PREDS and their number in N.
4827   Empty blocks are skipped.  SIZE is used to allocate PREDS.  */
4828static void
4829cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4830{
4831  edge e;
4832  edge_iterator ei;
4833
4834  gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4835
4836  FOR_EACH_EDGE (e, ei, bb->preds)
4837    {
4838      basic_block pred_bb = e->src;
4839      insn_t bb_end = BB_END (pred_bb);
4840
4841      if (!in_current_region_p (pred_bb))
4842	{
4843	  gcc_assert (flag_sel_sched_pipelining_outer_loops
4844		      && current_loop_nest);
4845	  continue;
4846	}
4847
4848      if (sel_bb_empty_p (pred_bb))
4849	cfg_preds_1 (pred_bb, preds, n, size);
4850      else
4851	{
4852	  if (*n == *size)
4853	    *preds = XRESIZEVEC (insn_t, *preds,
4854                                 (*size = 2 * *size + 1));
4855	  (*preds)[(*n)++] = bb_end;
4856	}
4857    }
4858
4859  gcc_assert (*n != 0
4860	      || (flag_sel_sched_pipelining_outer_loops
4861		  && current_loop_nest));
4862}
4863
4864/* Find all predecessors of BB and record them in PREDS and their number
4865   in N.  Empty blocks are skipped, and only normal (forward in-region)
4866   edges are processed.  */
4867static void
4868cfg_preds (basic_block bb, insn_t **preds, int *n)
4869{
4870  int size = 0;
4871
4872  *preds = NULL;
4873  *n = 0;
4874  cfg_preds_1 (bb, preds, n, &size);
4875}
4876
4877/* Returns true if we are moving INSN through join point.  */
4878bool
4879sel_num_cfg_preds_gt_1 (insn_t insn)
4880{
4881  basic_block bb;
4882
4883  if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4884    return false;
4885
4886  bb = BLOCK_FOR_INSN (insn);
4887
4888  while (1)
4889    {
4890      if (EDGE_COUNT (bb->preds) > 1)
4891	return true;
4892
4893      gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4894      bb = EDGE_PRED (bb, 0)->src;
4895
4896      if (!sel_bb_empty_p (bb))
4897	break;
4898    }
4899
4900  return false;
4901}
4902
4903/* Returns true when BB should be the end of an ebb.  Adapted from the
4904   code in sched-ebb.c.  */
4905bool
4906bb_ends_ebb_p (basic_block bb)
4907{
4908  basic_block next_bb = bb_next_bb (bb);
4909  edge e;
4910
4911  if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4912      || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4913      || (LABEL_P (BB_HEAD (next_bb))
4914	  /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4915	     Work around that.  */
4916	  && !single_pred_p (next_bb)))
4917    return true;
4918
4919  if (!in_current_region_p (next_bb))
4920    return true;
4921
4922  e = find_fallthru_edge (bb->succs);
4923  if (e)
4924    {
4925      gcc_assert (e->dest == next_bb);
4926
4927      return false;
4928    }
4929
4930  return true;
4931}
4932
4933/* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4934   successor of INSN.  */
4935bool
4936in_same_ebb_p (insn_t insn, insn_t succ)
4937{
4938  basic_block ptr = BLOCK_FOR_INSN (insn);
4939
4940  for (;;)
4941    {
4942      if (ptr == BLOCK_FOR_INSN (succ))
4943        return true;
4944
4945      if (bb_ends_ebb_p (ptr))
4946        return false;
4947
4948      ptr = bb_next_bb (ptr);
4949    }
4950
4951  gcc_unreachable ();
4952  return false;
4953}
4954
4955/* Recomputes the reverse topological order for the function and
4956   saves it in REV_TOP_ORDER_INDEX.  REV_TOP_ORDER_INDEX_LEN is also
4957   modified appropriately.  */
4958static void
4959recompute_rev_top_order (void)
4960{
4961  int *postorder;
4962  int n_blocks, i;
4963
4964  if (!rev_top_order_index
4965      || rev_top_order_index_len < last_basic_block_for_fn (cfun))
4966    {
4967      rev_top_order_index_len = last_basic_block_for_fn (cfun);
4968      rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4969                                        rev_top_order_index_len);
4970    }
4971
4972  postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
4973
4974  n_blocks = post_order_compute (postorder, true, false);
4975  gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
4976
4977  /* Build reverse function: for each basic block with BB->INDEX == K
4978     rev_top_order_index[K] is it's reverse topological sort number.  */
4979  for (i = 0; i < n_blocks; i++)
4980    {
4981      gcc_assert (postorder[i] < rev_top_order_index_len);
4982      rev_top_order_index[postorder[i]] = i;
4983    }
4984
4985  free (postorder);
4986}
4987
4988/* Clear all flags from insns in BB that could spoil its rescheduling.  */
4989void
4990clear_outdated_rtx_info (basic_block bb)
4991{
4992  rtx_insn *insn;
4993
4994  FOR_BB_INSNS (bb, insn)
4995    if (INSN_P (insn))
4996      {
4997	SCHED_GROUP_P (insn) = 0;
4998	INSN_AFTER_STALL_P (insn) = 0;
4999	INSN_SCHED_TIMES (insn) = 0;
5000	EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
5001
5002        /* We cannot use the changed caches, as previously we could ignore
5003           the LHS dependence due to enabled renaming and transform
5004           the expression, and currently we'll be unable to do this.  */
5005        htab_empty (INSN_TRANSFORMED_INSNS (insn));
5006      }
5007}
5008
5009/* Add BB_NOTE to the pool of available basic block notes.  */
5010static void
5011return_bb_to_pool (basic_block bb)
5012{
5013  rtx note = bb_note (bb);
5014
5015  gcc_assert (NOTE_BASIC_BLOCK (note) == bb
5016	      && bb->aux == NULL);
5017
5018  /* It turns out that current cfg infrastructure does not support
5019     reuse of basic blocks.  Don't bother for now.  */
5020  /*bb_note_pool.safe_push (note);*/
5021}
5022
5023/* Get a bb_note from pool or return NULL_RTX if pool is empty.  */
5024static rtx_note *
5025get_bb_note_from_pool (void)
5026{
5027  if (bb_note_pool.is_empty ())
5028    return NULL;
5029  else
5030    {
5031      rtx_note *note = bb_note_pool.pop ();
5032
5033      SET_PREV_INSN (note) = NULL_RTX;
5034      SET_NEXT_INSN (note) = NULL_RTX;
5035
5036      return note;
5037    }
5038}
5039
5040/* Free bb_note_pool.  */
5041void
5042free_bb_note_pool (void)
5043{
5044  bb_note_pool.release ();
5045}
5046
5047/* Setup scheduler pool and successor structure.  */
5048void
5049alloc_sched_pools (void)
5050{
5051  int succs_size;
5052
5053  succs_size = MAX_WS + 1;
5054  succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
5055  succs_info_pool.size = succs_size;
5056  succs_info_pool.top = -1;
5057  succs_info_pool.max_top = -1;
5058
5059  sched_lists_pool = create_alloc_pool ("sel-sched-lists",
5060                                        sizeof (struct _list_node), 500);
5061}
5062
5063/* Free the pools.  */
5064void
5065free_sched_pools (void)
5066{
5067  int i;
5068
5069  free_alloc_pool (sched_lists_pool);
5070  gcc_assert (succs_info_pool.top == -1);
5071  for (i = 0; i <= succs_info_pool.max_top; i++)
5072    {
5073      succs_info_pool.stack[i].succs_ok.release ();
5074      succs_info_pool.stack[i].succs_other.release ();
5075      succs_info_pool.stack[i].probs_ok.release ();
5076    }
5077  free (succs_info_pool.stack);
5078}
5079
5080
5081/* Returns a position in RGN where BB can be inserted retaining
5082   topological order.  */
5083static int
5084find_place_to_insert_bb (basic_block bb, int rgn)
5085{
5086  bool has_preds_outside_rgn = false;
5087  edge e;
5088  edge_iterator ei;
5089
5090  /* Find whether we have preds outside the region.  */
5091  FOR_EACH_EDGE (e, ei, bb->preds)
5092    if (!in_current_region_p (e->src))
5093      {
5094        has_preds_outside_rgn = true;
5095        break;
5096      }
5097
5098  /* Recompute the top order -- needed when we have > 1 pred
5099     and in case we don't have preds outside.  */
5100  if (flag_sel_sched_pipelining_outer_loops
5101      && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5102    {
5103      int i, bbi = bb->index, cur_bbi;
5104
5105      recompute_rev_top_order ();
5106      for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5107        {
5108          cur_bbi = BB_TO_BLOCK (i);
5109          if (rev_top_order_index[bbi]
5110              < rev_top_order_index[cur_bbi])
5111            break;
5112        }
5113
5114      /* We skipped the right block, so we increase i.  We accommodate
5115         it for increasing by step later, so we decrease i.  */
5116      return (i + 1) - 1;
5117    }
5118  else if (has_preds_outside_rgn)
5119    {
5120      /* This is the case when we generate an extra empty block
5121         to serve as region head during pipelining.  */
5122      e = EDGE_SUCC (bb, 0);
5123      gcc_assert (EDGE_COUNT (bb->succs) == 1
5124                  && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5125                  && (BLOCK_TO_BB (e->dest->index) == 0));
5126      return -1;
5127    }
5128
5129  /* We don't have preds outside the region.  We should have
5130     the only pred, because the multiple preds case comes from
5131     the pipelining of outer loops, and that is handled above.
5132     Just take the bbi of this single pred.  */
5133  if (EDGE_COUNT (bb->succs) > 0)
5134    {
5135      int pred_bbi;
5136
5137      gcc_assert (EDGE_COUNT (bb->preds) == 1);
5138
5139      pred_bbi = EDGE_PRED (bb, 0)->src->index;
5140      return BLOCK_TO_BB (pred_bbi);
5141    }
5142  else
5143    /* BB has no successors.  It is safe to put it in the end.  */
5144    return current_nr_blocks - 1;
5145}
5146
5147/* Deletes an empty basic block freeing its data.  */
5148static void
5149delete_and_free_basic_block (basic_block bb)
5150{
5151  gcc_assert (sel_bb_empty_p (bb));
5152
5153  if (BB_LV_SET (bb))
5154    free_lv_set (bb);
5155
5156  bitmap_clear_bit (blocks_to_reschedule, bb->index);
5157
5158  /* Can't assert av_set properties because we use sel_aremove_bb
5159     when removing loop preheader from the region.  At the point of
5160     removing the preheader we already have deallocated sel_region_bb_info.  */
5161  gcc_assert (BB_LV_SET (bb) == NULL
5162              && !BB_LV_SET_VALID_P (bb)
5163              && BB_AV_LEVEL (bb) == 0
5164              && BB_AV_SET (bb) == NULL);
5165
5166  delete_basic_block (bb);
5167}
5168
5169/* Add BB to the current region and update the region data.  */
5170static void
5171add_block_to_current_region (basic_block bb)
5172{
5173  int i, pos, bbi = -2, rgn;
5174
5175  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5176  bbi = find_place_to_insert_bb (bb, rgn);
5177  bbi += 1;
5178  pos = RGN_BLOCKS (rgn) + bbi;
5179
5180  gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5181              && ebb_head[bbi] == pos);
5182
5183  /* Make a place for the new block.  */
5184  extend_regions ();
5185
5186  for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5187    BLOCK_TO_BB (rgn_bb_table[i])++;
5188
5189  memmove (rgn_bb_table + pos + 1,
5190           rgn_bb_table + pos,
5191           (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5192
5193  /* Initialize data for BB.  */
5194  rgn_bb_table[pos] = bb->index;
5195  BLOCK_TO_BB (bb->index) = bbi;
5196  CONTAINING_RGN (bb->index) = rgn;
5197
5198  RGN_NR_BLOCKS (rgn)++;
5199
5200  for (i = rgn + 1; i <= nr_regions; i++)
5201    RGN_BLOCKS (i)++;
5202}
5203
5204/* Remove BB from the current region and update the region data.  */
5205static void
5206remove_bb_from_region (basic_block bb)
5207{
5208  int i, pos, bbi = -2, rgn;
5209
5210  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5211  bbi = BLOCK_TO_BB (bb->index);
5212  pos = RGN_BLOCKS (rgn) + bbi;
5213
5214  gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5215              && ebb_head[bbi] == pos);
5216
5217  for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5218    BLOCK_TO_BB (rgn_bb_table[i])--;
5219
5220  memmove (rgn_bb_table + pos,
5221           rgn_bb_table + pos + 1,
5222           (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5223
5224  RGN_NR_BLOCKS (rgn)--;
5225  for (i = rgn + 1; i <= nr_regions; i++)
5226    RGN_BLOCKS (i)--;
5227}
5228
5229/* Add BB to the current region  and update all data.  If BB is NULL, add all
5230   blocks from last_added_blocks vector.  */
5231static void
5232sel_add_bb (basic_block bb)
5233{
5234  /* Extend luids so that new notes will receive zero luids.  */
5235  sched_extend_luids ();
5236  sched_init_bbs ();
5237  sel_init_bbs (last_added_blocks);
5238
5239  /* When bb is passed explicitly, the vector should contain
5240     the only element that equals to bb; otherwise, the vector
5241     should not be NULL.  */
5242  gcc_assert (last_added_blocks.exists ());
5243
5244  if (bb != NULL)
5245    {
5246      gcc_assert (last_added_blocks.length () == 1
5247                  && last_added_blocks[0] == bb);
5248      add_block_to_current_region (bb);
5249
5250      /* We associate creating/deleting data sets with the first insn
5251         appearing / disappearing in the bb.  */
5252      if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5253	create_initial_data_sets (bb);
5254
5255      last_added_blocks.release ();
5256    }
5257  else
5258    /* BB is NULL - process LAST_ADDED_BLOCKS instead.  */
5259    {
5260      int i;
5261      basic_block temp_bb = NULL;
5262
5263      for (i = 0;
5264           last_added_blocks.iterate (i, &bb); i++)
5265        {
5266          add_block_to_current_region (bb);
5267          temp_bb = bb;
5268        }
5269
5270      /* We need to fetch at least one bb so we know the region
5271         to update.  */
5272      gcc_assert (temp_bb != NULL);
5273      bb = temp_bb;
5274
5275      last_added_blocks.release ();
5276    }
5277
5278  rgn_setup_region (CONTAINING_RGN (bb->index));
5279}
5280
5281/* Remove BB from the current region and update all data.
5282   If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg.  */
5283static void
5284sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5285{
5286  unsigned idx = bb->index;
5287
5288  gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5289
5290  remove_bb_from_region (bb);
5291  return_bb_to_pool (bb);
5292  bitmap_clear_bit (blocks_to_reschedule, idx);
5293
5294  if (remove_from_cfg_p)
5295    {
5296      basic_block succ = single_succ (bb);
5297      delete_and_free_basic_block (bb);
5298      set_immediate_dominator (CDI_DOMINATORS, succ,
5299                               recompute_dominator (CDI_DOMINATORS, succ));
5300    }
5301
5302  rgn_setup_region (CONTAINING_RGN (idx));
5303}
5304
5305/* Concatenate info of EMPTY_BB to info of MERGE_BB.  */
5306static void
5307move_bb_info (basic_block merge_bb, basic_block empty_bb)
5308{
5309  if (in_current_region_p (merge_bb))
5310    concat_note_lists (BB_NOTE_LIST (empty_bb),
5311		       &BB_NOTE_LIST (merge_bb));
5312  BB_NOTE_LIST (empty_bb) = NULL;
5313
5314}
5315
5316/* Remove EMPTY_BB.  If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5317   region, but keep it in CFG.  */
5318static void
5319remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5320{
5321  /* The block should contain just a note or a label.
5322     We try to check whether it is unused below.  */
5323  gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5324              || LABEL_P (BB_HEAD (empty_bb)));
5325
5326  /* If basic block has predecessors or successors, redirect them.  */
5327  if (remove_from_cfg_p
5328      && (EDGE_COUNT (empty_bb->preds) > 0
5329	  || EDGE_COUNT (empty_bb->succs) > 0))
5330    {
5331      basic_block pred;
5332      basic_block succ;
5333
5334      /* We need to init PRED and SUCC before redirecting edges.  */
5335      if (EDGE_COUNT (empty_bb->preds) > 0)
5336	{
5337	  edge e;
5338
5339	  gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5340
5341	  e = EDGE_PRED (empty_bb, 0);
5342          gcc_assert (e->src == empty_bb->prev_bb
5343		      && (e->flags & EDGE_FALLTHRU));
5344
5345	  pred = empty_bb->prev_bb;
5346	}
5347      else
5348	pred = NULL;
5349
5350      if (EDGE_COUNT (empty_bb->succs) > 0)
5351	{
5352          /* We do not check fallthruness here as above, because
5353             after removing a jump the edge may actually be not fallthru.  */
5354	  gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5355	  succ = EDGE_SUCC (empty_bb, 0)->dest;
5356	}
5357      else
5358	succ = NULL;
5359
5360      if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5361        {
5362          edge e = EDGE_PRED (empty_bb, 0);
5363
5364          if (e->flags & EDGE_FALLTHRU)
5365            redirect_edge_succ_nodup (e, succ);
5366          else
5367            sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5368        }
5369
5370      if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5371	{
5372	  edge e = EDGE_SUCC (empty_bb, 0);
5373
5374	  if (find_edge (pred, e->dest) == NULL)
5375	    redirect_edge_pred (e, pred);
5376	}
5377    }
5378
5379  /* Finish removing.  */
5380  sel_remove_bb (empty_bb, remove_from_cfg_p);
5381}
5382
5383/* An implementation of create_basic_block hook, which additionally updates
5384   per-bb data structures.  */
5385static basic_block
5386sel_create_basic_block (void *headp, void *endp, basic_block after)
5387{
5388  basic_block new_bb;
5389  rtx_note *new_bb_note;
5390
5391  gcc_assert (flag_sel_sched_pipelining_outer_loops
5392              || !last_added_blocks.exists ());
5393
5394  new_bb_note = get_bb_note_from_pool ();
5395
5396  if (new_bb_note == NULL_RTX)
5397    new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5398  else
5399    {
5400      new_bb = create_basic_block_structure ((rtx_insn *) headp,
5401					     (rtx_insn *) endp,
5402					     new_bb_note, after);
5403      new_bb->aux = NULL;
5404    }
5405
5406  last_added_blocks.safe_push (new_bb);
5407
5408  return new_bb;
5409}
5410
5411/* Implement sched_init_only_bb ().  */
5412static void
5413sel_init_only_bb (basic_block bb, basic_block after)
5414{
5415  gcc_assert (after == NULL);
5416
5417  extend_regions ();
5418  rgn_make_new_region_out_of_new_block (bb);
5419}
5420
5421/* Update the latch when we've splitted or merged it from FROM block to TO.
5422   This should be checked for all outer loops, too.  */
5423static void
5424change_loops_latches (basic_block from, basic_block to)
5425{
5426  gcc_assert (from != to);
5427
5428  if (current_loop_nest)
5429    {
5430      struct loop *loop;
5431
5432      for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5433        if (considered_for_pipelining_p (loop) && loop->latch == from)
5434          {
5435            gcc_assert (loop == current_loop_nest);
5436            loop->latch = to;
5437            gcc_assert (loop_latch_edge (loop));
5438          }
5439    }
5440}
5441
5442/* Splits BB on two basic blocks, adding it to the region and extending
5443   per-bb data structures.  Returns the newly created bb.  */
5444static basic_block
5445sel_split_block (basic_block bb, rtx after)
5446{
5447  basic_block new_bb;
5448  insn_t insn;
5449
5450  new_bb = sched_split_block_1 (bb, after);
5451  sel_add_bb (new_bb);
5452
5453  /* This should be called after sel_add_bb, because this uses
5454     CONTAINING_RGN for the new block, which is not yet initialized.
5455     FIXME: this function may be a no-op now.  */
5456  change_loops_latches (bb, new_bb);
5457
5458  /* Update ORIG_BB_INDEX for insns moved into the new block.  */
5459  FOR_BB_INSNS (new_bb, insn)
5460   if (INSN_P (insn))
5461     EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5462
5463  if (sel_bb_empty_p (bb))
5464    {
5465      gcc_assert (!sel_bb_empty_p (new_bb));
5466
5467      /* NEW_BB has data sets that need to be updated and BB holds
5468	 data sets that should be removed.  Exchange these data sets
5469	 so that we won't lose BB's valid data sets.  */
5470      exchange_data_sets (new_bb, bb);
5471      free_data_sets (bb);
5472    }
5473
5474  if (!sel_bb_empty_p (new_bb)
5475      && bitmap_bit_p (blocks_to_reschedule, bb->index))
5476    bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5477
5478  return new_bb;
5479}
5480
5481/* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5482   Otherwise returns NULL.  */
5483static rtx_insn *
5484check_for_new_jump (basic_block bb, int prev_max_uid)
5485{
5486  rtx_insn *end;
5487
5488  end = sel_bb_end (bb);
5489  if (end && INSN_UID (end) >= prev_max_uid)
5490    return end;
5491  return NULL;
5492}
5493
5494/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5495   New means having UID at least equal to PREV_MAX_UID.  */
5496static rtx_insn *
5497find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5498{
5499  rtx_insn *jump;
5500
5501  /* Return immediately if no new insns were emitted.  */
5502  if (get_max_uid () == prev_max_uid)
5503    return NULL;
5504
5505  /* Now check both blocks for new jumps.  It will ever be only one.  */
5506  if ((jump = check_for_new_jump (from, prev_max_uid)))
5507    return jump;
5508
5509  if (jump_bb != NULL
5510      && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5511    return jump;
5512  return NULL;
5513}
5514
5515/* Splits E and adds the newly created basic block to the current region.
5516   Returns this basic block.  */
5517basic_block
5518sel_split_edge (edge e)
5519{
5520  basic_block new_bb, src, other_bb = NULL;
5521  int prev_max_uid;
5522  rtx_insn *jump;
5523
5524  src = e->src;
5525  prev_max_uid = get_max_uid ();
5526  new_bb = split_edge (e);
5527
5528  if (flag_sel_sched_pipelining_outer_loops
5529      && current_loop_nest)
5530    {
5531      int i;
5532      basic_block bb;
5533
5534      /* Some of the basic blocks might not have been added to the loop.
5535         Add them here, until this is fixed in force_fallthru.  */
5536      for (i = 0;
5537           last_added_blocks.iterate (i, &bb); i++)
5538        if (!bb->loop_father)
5539          {
5540            add_bb_to_loop (bb, e->dest->loop_father);
5541
5542            gcc_assert (!other_bb && (new_bb->index != bb->index));
5543            other_bb = bb;
5544          }
5545    }
5546
5547  /* Add all last_added_blocks to the region.  */
5548  sel_add_bb (NULL);
5549
5550  jump = find_new_jump (src, new_bb, prev_max_uid);
5551  if (jump)
5552    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5553
5554  /* Put the correct lv set on this block.  */
5555  if (other_bb && !sel_bb_empty_p (other_bb))
5556    compute_live (sel_bb_head (other_bb));
5557
5558  return new_bb;
5559}
5560
5561/* Implement sched_create_empty_bb ().  */
5562static basic_block
5563sel_create_empty_bb (basic_block after)
5564{
5565  basic_block new_bb;
5566
5567  new_bb = sched_create_empty_bb_1 (after);
5568
5569  /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5570     later.  */
5571  gcc_assert (last_added_blocks.length () == 1
5572	      && last_added_blocks[0] == new_bb);
5573
5574  last_added_blocks.release ();
5575  return new_bb;
5576}
5577
5578/* Implement sched_create_recovery_block.  ORIG_INSN is where block
5579   will be splitted to insert a check.  */
5580basic_block
5581sel_create_recovery_block (insn_t orig_insn)
5582{
5583  basic_block first_bb, second_bb, recovery_block;
5584  basic_block before_recovery = NULL;
5585  rtx_insn *jump;
5586
5587  first_bb = BLOCK_FOR_INSN (orig_insn);
5588  if (sel_bb_end_p (orig_insn))
5589    {
5590      /* Avoid introducing an empty block while splitting.  */
5591      gcc_assert (single_succ_p (first_bb));
5592      second_bb = single_succ (first_bb);
5593    }
5594  else
5595    second_bb = sched_split_block (first_bb, orig_insn);
5596
5597  recovery_block = sched_create_recovery_block (&before_recovery);
5598  if (before_recovery)
5599    copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
5600
5601  gcc_assert (sel_bb_empty_p (recovery_block));
5602  sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5603  if (current_loops != NULL)
5604    add_bb_to_loop (recovery_block, first_bb->loop_father);
5605
5606  sel_add_bb (recovery_block);
5607
5608  jump = BB_END (recovery_block);
5609  gcc_assert (sel_bb_head (recovery_block) == jump);
5610  sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5611
5612  return recovery_block;
5613}
5614
5615/* Merge basic block B into basic block A.  */
5616static void
5617sel_merge_blocks (basic_block a, basic_block b)
5618{
5619  gcc_assert (sel_bb_empty_p (b)
5620              && EDGE_COUNT (b->preds) == 1
5621              && EDGE_PRED (b, 0)->src == b->prev_bb);
5622
5623  move_bb_info (b->prev_bb, b);
5624  remove_empty_bb (b, false);
5625  merge_blocks (a, b);
5626  change_loops_latches (b, a);
5627}
5628
5629/* A wrapper for redirect_edge_and_branch_force, which also initializes
5630   data structures for possibly created bb and insns.  */
5631void
5632sel_redirect_edge_and_branch_force (edge e, basic_block to)
5633{
5634  basic_block jump_bb, src, orig_dest = e->dest;
5635  int prev_max_uid;
5636  rtx_insn *jump;
5637  int old_seqno = -1;
5638
5639  /* This function is now used only for bookkeeping code creation, where
5640     we'll never get the single pred of orig_dest block and thus will not
5641     hit unreachable blocks when updating dominator info.  */
5642  gcc_assert (!sel_bb_empty_p (e->src)
5643              && !single_pred_p (orig_dest));
5644  src = e->src;
5645  prev_max_uid = get_max_uid ();
5646  /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5647     when the conditional jump being redirected may become unconditional.  */
5648  if (any_condjump_p (BB_END (src))
5649      && INSN_SEQNO (BB_END (src)) >= 0)
5650    old_seqno = INSN_SEQNO (BB_END (src));
5651
5652  jump_bb = redirect_edge_and_branch_force (e, to);
5653  if (jump_bb != NULL)
5654    sel_add_bb (jump_bb);
5655
5656  /* This function could not be used to spoil the loop structure by now,
5657     thus we don't care to update anything.  But check it to be sure.  */
5658  if (current_loop_nest
5659      && pipelining_p)
5660    gcc_assert (loop_latch_edge (current_loop_nest));
5661
5662  jump = find_new_jump (src, jump_bb, prev_max_uid);
5663  if (jump)
5664    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP,
5665		       old_seqno);
5666  set_immediate_dominator (CDI_DOMINATORS, to,
5667			   recompute_dominator (CDI_DOMINATORS, to));
5668  set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5669			   recompute_dominator (CDI_DOMINATORS, orig_dest));
5670}
5671
5672/* A wrapper for redirect_edge_and_branch.  Return TRUE if blocks connected by
5673   redirected edge are in reverse topological order.  */
5674bool
5675sel_redirect_edge_and_branch (edge e, basic_block to)
5676{
5677  bool latch_edge_p;
5678  basic_block src, orig_dest = e->dest;
5679  int prev_max_uid;
5680  rtx_insn *jump;
5681  edge redirected;
5682  bool recompute_toporder_p = false;
5683  bool maybe_unreachable = single_pred_p (orig_dest);
5684  int old_seqno = -1;
5685
5686  latch_edge_p = (pipelining_p
5687                  && current_loop_nest
5688                  && e == loop_latch_edge (current_loop_nest));
5689
5690  src = e->src;
5691  prev_max_uid = get_max_uid ();
5692
5693  /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5694     when the conditional jump being redirected may become unconditional.  */
5695  if (any_condjump_p (BB_END (src))
5696      && INSN_SEQNO (BB_END (src)) >= 0)
5697    old_seqno = INSN_SEQNO (BB_END (src));
5698
5699  redirected = redirect_edge_and_branch (e, to);
5700
5701  gcc_assert (redirected && !last_added_blocks.exists ());
5702
5703  /* When we've redirected a latch edge, update the header.  */
5704  if (latch_edge_p)
5705    {
5706      current_loop_nest->header = to;
5707      gcc_assert (loop_latch_edge (current_loop_nest));
5708    }
5709
5710  /* In rare situations, the topological relation between the blocks connected
5711     by the redirected edge can change (see PR42245 for an example).  Update
5712     block_to_bb/bb_to_block.  */
5713  if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5714      && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5715    recompute_toporder_p = true;
5716
5717  jump = find_new_jump (src, NULL, prev_max_uid);
5718  if (jump)
5719    sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno);
5720
5721  /* Only update dominator info when we don't have unreachable blocks.
5722     Otherwise we'll update in maybe_tidy_empty_bb.  */
5723  if (!maybe_unreachable)
5724    {
5725      set_immediate_dominator (CDI_DOMINATORS, to,
5726                               recompute_dominator (CDI_DOMINATORS, to));
5727      set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5728                               recompute_dominator (CDI_DOMINATORS, orig_dest));
5729    }
5730  return recompute_toporder_p;
5731}
5732
5733/* This variable holds the cfg hooks used by the selective scheduler.  */
5734static struct cfg_hooks sel_cfg_hooks;
5735
5736/* Register sel-sched cfg hooks.  */
5737void
5738sel_register_cfg_hooks (void)
5739{
5740  sched_split_block = sel_split_block;
5741
5742  orig_cfg_hooks = get_cfg_hooks ();
5743  sel_cfg_hooks = orig_cfg_hooks;
5744
5745  sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5746
5747  set_cfg_hooks (sel_cfg_hooks);
5748
5749  sched_init_only_bb = sel_init_only_bb;
5750  sched_split_block = sel_split_block;
5751  sched_create_empty_bb = sel_create_empty_bb;
5752}
5753
5754/* Unregister sel-sched cfg hooks.  */
5755void
5756sel_unregister_cfg_hooks (void)
5757{
5758  sched_create_empty_bb = NULL;
5759  sched_split_block = NULL;
5760  sched_init_only_bb = NULL;
5761
5762  set_cfg_hooks (orig_cfg_hooks);
5763}
5764
5765
5766/* Emit an insn rtx based on PATTERN.  If a jump insn is wanted,
5767   LABEL is where this jump should be directed.  */
5768rtx_insn *
5769create_insn_rtx_from_pattern (rtx pattern, rtx label)
5770{
5771  rtx_insn *insn_rtx;
5772
5773  gcc_assert (!INSN_P (pattern));
5774
5775  start_sequence ();
5776
5777  if (label == NULL_RTX)
5778    insn_rtx = emit_insn (pattern);
5779  else if (DEBUG_INSN_P (label))
5780    insn_rtx = emit_debug_insn (pattern);
5781  else
5782    {
5783      insn_rtx = emit_jump_insn (pattern);
5784      JUMP_LABEL (insn_rtx) = label;
5785      ++LABEL_NUSES (label);
5786    }
5787
5788  end_sequence ();
5789
5790  sched_extend_luids ();
5791  sched_extend_target ();
5792  sched_deps_init (false);
5793
5794  /* Initialize INSN_CODE now.  */
5795  recog_memoized (insn_rtx);
5796  return insn_rtx;
5797}
5798
5799/* Create a new vinsn for INSN_RTX.  FORCE_UNIQUE_P is true when the vinsn
5800   must not be clonable.  */
5801vinsn_t
5802create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
5803{
5804  gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5805
5806  /* If VINSN_TYPE is not USE, retain its uniqueness.  */
5807  return vinsn_create (insn_rtx, force_unique_p);
5808}
5809
5810/* Create a copy of INSN_RTX.  */
5811rtx_insn *
5812create_copy_of_insn_rtx (rtx insn_rtx)
5813{
5814  rtx_insn *res;
5815  rtx link;
5816
5817  if (DEBUG_INSN_P (insn_rtx))
5818    return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5819					 insn_rtx);
5820
5821  gcc_assert (NONJUMP_INSN_P (insn_rtx));
5822
5823  res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5824                                      NULL_RTX);
5825
5826  /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5827     since mark_jump_label will make them.  REG_LABEL_TARGETs are created
5828     there too, but are supposed to be sticky, so we copy them.  */
5829  for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5830    if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5831	&& REG_NOTE_KIND (link) != REG_EQUAL
5832	&& REG_NOTE_KIND (link) != REG_EQUIV)
5833      {
5834	if (GET_CODE (link) == EXPR_LIST)
5835	  add_reg_note (res, REG_NOTE_KIND (link),
5836			copy_insn_1 (XEXP (link, 0)));
5837	else
5838	  add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5839      }
5840
5841  return res;
5842}
5843
5844/* Change vinsn field of EXPR to hold NEW_VINSN.  */
5845void
5846change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5847{
5848  vinsn_detach (EXPR_VINSN (expr));
5849
5850  EXPR_VINSN (expr) = new_vinsn;
5851  vinsn_attach (new_vinsn);
5852}
5853
5854/* Helpers for global init.  */
5855/* This structure is used to be able to call existing bundling mechanism
5856   and calculate insn priorities.  */
5857static struct haifa_sched_info sched_sel_haifa_sched_info =
5858{
5859  NULL, /* init_ready_list */
5860  NULL, /* can_schedule_ready_p */
5861  NULL, /* schedule_more_p */
5862  NULL, /* new_ready */
5863  NULL, /* rgn_rank */
5864  sel_print_insn, /* rgn_print_insn */
5865  contributes_to_priority,
5866  NULL, /* insn_finishes_block_p */
5867
5868  NULL, NULL,
5869  NULL, NULL,
5870  0, 0,
5871
5872  NULL, /* add_remove_insn */
5873  NULL, /* begin_schedule_ready */
5874  NULL, /* begin_move_insn */
5875  NULL, /* advance_target_bb */
5876
5877  NULL,
5878  NULL,
5879
5880  SEL_SCHED | NEW_BBS
5881};
5882
5883/* Setup special insns used in the scheduler.  */
5884void
5885setup_nop_and_exit_insns (void)
5886{
5887  gcc_assert (nop_pattern == NULL_RTX
5888	      && exit_insn == NULL_RTX);
5889
5890  nop_pattern = constm1_rtx;
5891
5892  start_sequence ();
5893  emit_insn (nop_pattern);
5894  exit_insn = get_insns ();
5895  end_sequence ();
5896  set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
5897}
5898
5899/* Free special insns used in the scheduler.  */
5900void
5901free_nop_and_exit_insns (void)
5902{
5903  exit_insn = NULL;
5904  nop_pattern = NULL_RTX;
5905}
5906
5907/* Setup a special vinsn used in new insns initialization.  */
5908void
5909setup_nop_vinsn (void)
5910{
5911  nop_vinsn = vinsn_create (exit_insn, false);
5912  vinsn_attach (nop_vinsn);
5913}
5914
5915/* Free a special vinsn used in new insns initialization.  */
5916void
5917free_nop_vinsn (void)
5918{
5919  gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5920  vinsn_detach (nop_vinsn);
5921  nop_vinsn = NULL;
5922}
5923
5924/* Call a set_sched_flags hook.  */
5925void
5926sel_set_sched_flags (void)
5927{
5928  /* ??? This means that set_sched_flags were called, and we decided to
5929     support speculation.  However, set_sched_flags also modifies flags
5930     on current_sched_info, doing this only at global init.  And we
5931     sometimes change c_s_i later.  So put the correct flags again.  */
5932  if (spec_info && targetm.sched.set_sched_flags)
5933    targetm.sched.set_sched_flags (spec_info);
5934}
5935
5936/* Setup pointers to global sched info structures.  */
5937void
5938sel_setup_sched_infos (void)
5939{
5940  rgn_setup_common_sched_info ();
5941
5942  memcpy (&sel_common_sched_info, common_sched_info,
5943	  sizeof (sel_common_sched_info));
5944
5945  sel_common_sched_info.fix_recovery_cfg = NULL;
5946  sel_common_sched_info.add_block = NULL;
5947  sel_common_sched_info.estimate_number_of_insns
5948    = sel_estimate_number_of_insns;
5949  sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5950  sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5951
5952  common_sched_info = &sel_common_sched_info;
5953
5954  current_sched_info = &sched_sel_haifa_sched_info;
5955  current_sched_info->sched_max_insns_priority =
5956    get_rgn_sched_max_insns_priority ();
5957
5958  sel_set_sched_flags ();
5959}
5960
5961
5962/* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5963   *BB_ORD_INDEX after that is increased.  */
5964static void
5965sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5966{
5967  RGN_NR_BLOCKS (rgn) += 1;
5968  RGN_DONT_CALC_DEPS (rgn) = 0;
5969  RGN_HAS_REAL_EBB (rgn) = 0;
5970  CONTAINING_RGN (bb->index) = rgn;
5971  BLOCK_TO_BB (bb->index) = *bb_ord_index;
5972  rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5973  (*bb_ord_index)++;
5974
5975  /* FIXME: it is true only when not scheduling ebbs.  */
5976  RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5977}
5978
5979/* Functions to support pipelining of outer loops.  */
5980
5981/* Creates a new empty region and returns it's number.  */
5982static int
5983sel_create_new_region (void)
5984{
5985  int new_rgn_number = nr_regions;
5986
5987  RGN_NR_BLOCKS (new_rgn_number) = 0;
5988
5989  /* FIXME: This will work only when EBBs are not created.  */
5990  if (new_rgn_number != 0)
5991    RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5992      RGN_NR_BLOCKS (new_rgn_number - 1);
5993  else
5994    RGN_BLOCKS (new_rgn_number) = 0;
5995
5996  /* Set the blocks of the next region so the other functions may
5997     calculate the number of blocks in the region.  */
5998  RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5999    RGN_NR_BLOCKS (new_rgn_number);
6000
6001  nr_regions++;
6002
6003  return new_rgn_number;
6004}
6005
6006/* If X has a smaller topological sort number than Y, returns -1;
6007   if greater, returns 1.  */
6008static int
6009bb_top_order_comparator (const void *x, const void *y)
6010{
6011  basic_block bb1 = *(const basic_block *) x;
6012  basic_block bb2 = *(const basic_block *) y;
6013
6014  gcc_assert (bb1 == bb2
6015	      || rev_top_order_index[bb1->index]
6016		 != rev_top_order_index[bb2->index]);
6017
6018  /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
6019     bbs with greater number should go earlier.  */
6020  if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
6021    return -1;
6022  else
6023    return 1;
6024}
6025
6026/* Create a region for LOOP and return its number.  If we don't want
6027   to pipeline LOOP, return -1.  */
6028static int
6029make_region_from_loop (struct loop *loop)
6030{
6031  unsigned int i;
6032  int new_rgn_number = -1;
6033  struct loop *inner;
6034
6035  /* Basic block index, to be assigned to BLOCK_TO_BB.  */
6036  int bb_ord_index = 0;
6037  basic_block *loop_blocks;
6038  basic_block preheader_block;
6039
6040  if (loop->num_nodes
6041      > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
6042    return -1;
6043
6044  /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
6045  for (inner = loop->inner; inner; inner = inner->inner)
6046    if (flow_bb_inside_loop_p (inner, loop->latch))
6047      return -1;
6048
6049  loop->ninsns = num_loop_insns (loop);
6050  if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
6051    return -1;
6052
6053  loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
6054
6055  for (i = 0; i < loop->num_nodes; i++)
6056    if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
6057      {
6058	free (loop_blocks);
6059	return -1;
6060      }
6061
6062  preheader_block = loop_preheader_edge (loop)->src;
6063  gcc_assert (preheader_block);
6064  gcc_assert (loop_blocks[0] == loop->header);
6065
6066  new_rgn_number = sel_create_new_region ();
6067
6068  sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6069  bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6070
6071  for (i = 0; i < loop->num_nodes; i++)
6072    {
6073      /* Add only those blocks that haven't been scheduled in the inner loop.
6074	 The exception is the basic blocks with bookkeeping code - they should
6075	 be added to the region (and they actually don't belong to the loop
6076	 body, but to the region containing that loop body).  */
6077
6078      gcc_assert (new_rgn_number >= 0);
6079
6080      if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6081	{
6082	  sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6083                                   new_rgn_number);
6084	  bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6085	}
6086    }
6087
6088  free (loop_blocks);
6089  MARK_LOOP_FOR_PIPELINING (loop);
6090
6091  return new_rgn_number;
6092}
6093
6094/* Create a new region from preheader blocks LOOP_BLOCKS.  */
6095void
6096make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6097{
6098  unsigned int i;
6099  int new_rgn_number = -1;
6100  basic_block bb;
6101
6102  /* Basic block index, to be assigned to BLOCK_TO_BB.  */
6103  int bb_ord_index = 0;
6104
6105  new_rgn_number = sel_create_new_region ();
6106
6107  FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6108    {
6109      gcc_assert (new_rgn_number >= 0);
6110
6111      sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6112    }
6113
6114  vec_free (loop_blocks);
6115}
6116
6117
6118/* Create region(s) from loop nest LOOP, such that inner loops will be
6119   pipelined before outer loops.  Returns true when a region for LOOP
6120   is created.  */
6121static bool
6122make_regions_from_loop_nest (struct loop *loop)
6123{
6124  struct loop *cur_loop;
6125  int rgn_number;
6126
6127  /* Traverse all inner nodes of the loop.  */
6128  for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6129    if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6130      return false;
6131
6132  /* At this moment all regular inner loops should have been pipelined.
6133     Try to create a region from this loop.  */
6134  rgn_number = make_region_from_loop (loop);
6135
6136  if (rgn_number < 0)
6137    return false;
6138
6139  loop_nests.safe_push (loop);
6140  return true;
6141}
6142
6143/* Initalize data structures needed.  */
6144void
6145sel_init_pipelining (void)
6146{
6147  /* Collect loop information to be used in outer loops pipelining.  */
6148  loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6149                       | LOOPS_HAVE_FALLTHRU_PREHEADERS
6150		       | LOOPS_HAVE_RECORDED_EXITS
6151		       | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6152  current_loop_nest = NULL;
6153
6154  bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
6155  bitmap_clear (bbs_in_loop_rgns);
6156
6157  recompute_rev_top_order ();
6158}
6159
6160/* Returns a struct loop for region RGN.  */
6161loop_p
6162get_loop_nest_for_rgn (unsigned int rgn)
6163{
6164  /* Regions created with extend_rgns don't have corresponding loop nests,
6165     because they don't represent loops.  */
6166  if (rgn < loop_nests.length ())
6167    return loop_nests[rgn];
6168  else
6169    return NULL;
6170}
6171
6172/* True when LOOP was included into pipelining regions.   */
6173bool
6174considered_for_pipelining_p (struct loop *loop)
6175{
6176  if (loop_depth (loop) == 0)
6177    return false;
6178
6179  /* Now, the loop could be too large or irreducible.  Check whether its
6180     region is in LOOP_NESTS.
6181     We determine the region number of LOOP as the region number of its
6182     latch.  We can't use header here, because this header could be
6183     just removed preheader and it will give us the wrong region number.
6184     Latch can't be used because it could be in the inner loop too.  */
6185  if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6186    {
6187      int rgn = CONTAINING_RGN (loop->latch->index);
6188
6189      gcc_assert ((unsigned) rgn < loop_nests.length ());
6190      return true;
6191    }
6192
6193  return false;
6194}
6195
6196/* Makes regions from the rest of the blocks, after loops are chosen
6197   for pipelining.  */
6198static void
6199make_regions_from_the_rest (void)
6200{
6201  int cur_rgn_blocks;
6202  int *loop_hdr;
6203  int i;
6204
6205  basic_block bb;
6206  edge e;
6207  edge_iterator ei;
6208  int *degree;
6209
6210  /* Index in rgn_bb_table where to start allocating new regions.  */
6211  cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6212
6213  /* Make regions from all the rest basic blocks - those that don't belong to
6214     any loop or belong to irreducible loops.  Prepare the data structures
6215     for extend_rgns.  */
6216
6217  /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6218     LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6219     loop.  */
6220  loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6221  degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6222
6223
6224  /* For each basic block that belongs to some loop assign the number
6225     of innermost loop it belongs to.  */
6226  for (i = 0; i < last_basic_block_for_fn (cfun); i++)
6227    loop_hdr[i] = -1;
6228
6229  FOR_EACH_BB_FN (bb, cfun)
6230    {
6231      if (bb->loop_father && bb->loop_father->num != 0
6232	  && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6233	loop_hdr[bb->index] = bb->loop_father->num;
6234    }
6235
6236  /* For each basic block degree is calculated as the number of incoming
6237     edges, that are going out of bbs that are not yet scheduled.
6238     The basic blocks that are scheduled have degree value of zero.  */
6239  FOR_EACH_BB_FN (bb, cfun)
6240    {
6241      degree[bb->index] = 0;
6242
6243      if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6244	{
6245	  FOR_EACH_EDGE (e, ei, bb->preds)
6246	    if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6247	      degree[bb->index]++;
6248	}
6249      else
6250	degree[bb->index] = -1;
6251    }
6252
6253  extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6254
6255  /* Any block that did not end up in a region is placed into a region
6256     by itself.  */
6257  FOR_EACH_BB_FN (bb, cfun)
6258    if (degree[bb->index] >= 0)
6259      {
6260	rgn_bb_table[cur_rgn_blocks] = bb->index;
6261	RGN_NR_BLOCKS (nr_regions) = 1;
6262	RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6263        RGN_DONT_CALC_DEPS (nr_regions) = 0;
6264	RGN_HAS_REAL_EBB (nr_regions) = 0;
6265	CONTAINING_RGN (bb->index) = nr_regions++;
6266	BLOCK_TO_BB (bb->index) = 0;
6267      }
6268
6269  free (degree);
6270  free (loop_hdr);
6271}
6272
6273/* Free data structures used in pipelining of loops.  */
6274void sel_finish_pipelining (void)
6275{
6276  struct loop *loop;
6277
6278  /* Release aux fields so we don't free them later by mistake.  */
6279  FOR_EACH_LOOP (loop, 0)
6280    loop->aux = NULL;
6281
6282  loop_optimizer_finalize ();
6283
6284  loop_nests.release ();
6285
6286  free (rev_top_order_index);
6287  rev_top_order_index = NULL;
6288}
6289
6290/* This function replaces the find_rgns when
6291   FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set.  */
6292void
6293sel_find_rgns (void)
6294{
6295  sel_init_pipelining ();
6296  extend_regions ();
6297
6298  if (current_loops)
6299    {
6300      loop_p loop;
6301
6302      FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
6303			    ? LI_FROM_INNERMOST
6304			    : LI_ONLY_INNERMOST))
6305	make_regions_from_loop_nest (loop);
6306    }
6307
6308  /* Make regions from all the rest basic blocks and schedule them.
6309     These blocks include blocks that don't belong to any loop or belong
6310     to irreducible loops.  */
6311  make_regions_from_the_rest ();
6312
6313  /* We don't need bbs_in_loop_rgns anymore.  */
6314  sbitmap_free (bbs_in_loop_rgns);
6315  bbs_in_loop_rgns = NULL;
6316}
6317
6318/* Add the preheader blocks from previous loop to current region taking
6319   it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6320   This function is only used with -fsel-sched-pipelining-outer-loops.  */
6321void
6322sel_add_loop_preheaders (bb_vec_t *bbs)
6323{
6324  int i;
6325  basic_block bb;
6326  vec<basic_block> *preheader_blocks
6327    = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6328
6329  if (!preheader_blocks)
6330    return;
6331
6332  for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6333    {
6334      bbs->safe_push (bb);
6335      last_added_blocks.safe_push (bb);
6336      sel_add_bb (bb);
6337    }
6338
6339  vec_free (preheader_blocks);
6340}
6341
6342/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6343   Please note that the function should also work when pipelining_p is
6344   false, because it is used when deciding whether we should or should
6345   not reschedule pipelined code.  */
6346bool
6347sel_is_loop_preheader_p (basic_block bb)
6348{
6349  if (current_loop_nest)
6350    {
6351      struct loop *outer;
6352
6353      if (preheader_removed)
6354        return false;
6355
6356      /* Preheader is the first block in the region.  */
6357      if (BLOCK_TO_BB (bb->index) == 0)
6358        return true;
6359
6360      /* We used to find a preheader with the topological information.
6361         Check that the above code is equivalent to what we did before.  */
6362
6363      if (in_current_region_p (current_loop_nest->header))
6364	gcc_assert (!(BLOCK_TO_BB (bb->index)
6365		      < BLOCK_TO_BB (current_loop_nest->header->index)));
6366
6367      /* Support the situation when the latch block of outer loop
6368         could be from here.  */
6369      for (outer = loop_outer (current_loop_nest);
6370	   outer;
6371	   outer = loop_outer (outer))
6372        if (considered_for_pipelining_p (outer) && outer->latch == bb)
6373          gcc_unreachable ();
6374    }
6375
6376  return false;
6377}
6378
6379/* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6380   can be removed, making the corresponding edge fallthrough (assuming that
6381   all basic blocks between JUMP_BB and DEST_BB are empty).  */
6382static bool
6383bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6384{
6385  if (!onlyjump_p (BB_END (jump_bb))
6386      || tablejump_p (BB_END (jump_bb), NULL, NULL))
6387    return false;
6388
6389  /* Several outgoing edges, abnormal edge or destination of jump is
6390     not DEST_BB.  */
6391  if (EDGE_COUNT (jump_bb->succs) != 1
6392      || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6393      || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6394    return false;
6395
6396  /* If not anything of the upper.  */
6397  return true;
6398}
6399
6400/* Removes the loop preheader from the current region and saves it in
6401   PREHEADER_BLOCKS of the father loop, so they will be added later to
6402   region that represents an outer loop.  */
6403static void
6404sel_remove_loop_preheader (void)
6405{
6406  int i, old_len;
6407  int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6408  basic_block bb;
6409  bool all_empty_p = true;
6410  vec<basic_block> *preheader_blocks
6411    = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6412
6413  vec_check_alloc (preheader_blocks, 0);
6414
6415  gcc_assert (current_loop_nest);
6416  old_len = preheader_blocks->length ();
6417
6418  /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS.  */
6419  for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6420    {
6421      bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6422
6423      /* If the basic block belongs to region, but doesn't belong to
6424	 corresponding loop, then it should be a preheader.  */
6425      if (sel_is_loop_preheader_p (bb))
6426        {
6427          preheader_blocks->safe_push (bb);
6428          if (BB_END (bb) != bb_note (bb))
6429            all_empty_p = false;
6430        }
6431    }
6432
6433  /* Remove these blocks only after iterating over the whole region.  */
6434  for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6435    {
6436      bb =  (*preheader_blocks)[i];
6437      sel_remove_bb (bb, false);
6438    }
6439
6440  if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6441    {
6442      if (!all_empty_p)
6443        /* Immediately create new region from preheader.  */
6444        make_region_from_loop_preheader (preheader_blocks);
6445      else
6446        {
6447          /* If all preheader blocks are empty - dont create new empty region.
6448             Instead, remove them completely.  */
6449          FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6450            {
6451              edge e;
6452              edge_iterator ei;
6453              basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6454
6455              /* Redirect all incoming edges to next basic block.  */
6456              for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6457                {
6458                  if (! (e->flags & EDGE_FALLTHRU))
6459                    redirect_edge_and_branch (e, bb->next_bb);
6460                  else
6461                    redirect_edge_succ (e, bb->next_bb);
6462                }
6463              gcc_assert (BB_NOTE_LIST (bb) == NULL);
6464              delete_and_free_basic_block (bb);
6465
6466              /* Check if after deleting preheader there is a nonconditional
6467                 jump in PREV_BB that leads to the next basic block NEXT_BB.
6468                 If it is so - delete this jump and clear data sets of its
6469                 basic block if it becomes empty.  */
6470	      if (next_bb->prev_bb == prev_bb
6471		  && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
6472                  && bb_has_removable_jump_to_p (prev_bb, next_bb))
6473                {
6474                  redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6475                  if (BB_END (prev_bb) == bb_note (prev_bb))
6476                    free_data_sets (prev_bb);
6477                }
6478
6479              set_immediate_dominator (CDI_DOMINATORS, next_bb,
6480                                       recompute_dominator (CDI_DOMINATORS,
6481                                                            next_bb));
6482            }
6483        }
6484      vec_free (preheader_blocks);
6485    }
6486  else
6487    /* Store preheader within the father's loop structure.  */
6488    SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6489			       preheader_blocks);
6490}
6491
6492#endif
6493