1/* Instruction scheduling pass.
2   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5   and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 2, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING.  If not, write to the Free
21Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2202110-1301, USA.  */
23
24/* Instruction scheduling pass.  This file, along with sched-deps.c,
25   contains the generic parts.  The actual entry point is found for
26   the normal instruction scheduling pass is found in sched-rgn.c.
27
28   We compute insn priorities based on data dependencies.  Flow
29   analysis only creates a fraction of the data-dependencies we must
30   observe: namely, only those dependencies which the combiner can be
31   expected to use.  For this pass, we must therefore create the
32   remaining dependencies we need to observe: register dependencies,
33   memory dependencies, dependencies to keep function calls in order,
34   and the dependence between a conditional branch and the setting of
35   condition codes are all dealt with here.
36
37   The scheduler first traverses the data flow graph, starting with
38   the last instruction, and proceeding to the first, assigning values
39   to insn_priority as it goes.  This sorts the instructions
40   topologically by data dependence.
41
42   Once priorities have been established, we order the insns using
43   list scheduling.  This works as follows: starting with a list of
44   all the ready insns, and sorted according to priority number, we
45   schedule the insn from the end of the list by placing its
46   predecessors in the list according to their priority order.  We
47   consider this insn scheduled by setting the pointer to the "end" of
48   the list to point to the previous insn.  When an insn has no
49   predecessors, we either queue it until sufficient time has elapsed
50   or add it to the ready list.  As the instructions are scheduled or
51   when stalls are introduced, the queue advances and dumps insns into
52   the ready list.  When all insns down to the lowest priority have
53   been scheduled, the critical path of the basic block has been made
54   as short as possible.  The remaining insns are then scheduled in
55   remaining slots.
56
57   The following list shows the order in which we want to break ties
58   among insns in the ready list:
59
60   1.  choose insn with the longest path to end of bb, ties
61   broken by
62   2.  choose insn with least contribution to register pressure,
63   ties broken by
64   3.  prefer in-block upon interblock motion, ties broken by
65   4.  prefer useful upon speculative motion, ties broken by
66   5.  choose insn with largest control flow probability, ties
67   broken by
68   6.  choose insn with the least dependences upon the previously
69   scheduled insn, or finally
70   7   choose the insn which has the most insns dependent on it.
71   8.  choose insn with lowest UID.
72
73   Memory references complicate matters.  Only if we can be certain
74   that memory references are not part of the data dependency graph
75   (via true, anti, or output dependence), can we move operations past
76   memory references.  To first approximation, reads can be done
77   independently, while writes introduce dependencies.  Better
78   approximations will yield fewer dependencies.
79
80   Before reload, an extended analysis of interblock data dependences
81   is required for interblock scheduling.  This is performed in
82   compute_block_backward_dependences ().
83
84   Dependencies set up by memory references are treated in exactly the
85   same way as other dependencies, by using LOG_LINKS backward
86   dependences.  LOG_LINKS are translated into INSN_DEPEND forward
87   dependences for the purpose of forward list scheduling.
88
89   Having optimized the critical path, we may have also unduly
90   extended the lifetimes of some registers.  If an operation requires
91   that constants be loaded into registers, it is certainly desirable
92   to load those constants as early as necessary, but no earlier.
93   I.e., it will not do to load up a bunch of registers at the
94   beginning of a basic block only to use them at the end, if they
95   could be loaded later, since this may result in excessive register
96   utilization.
97
98   Note that since branches are never in basic blocks, but only end
99   basic blocks, this pass will not move branches.  But that is ok,
100   since we can use GNU's delayed branch scheduling pass to take care
101   of this case.
102
103   Also note that no further optimizations based on algebraic
104   identities are performed, so this pass would be a good one to
105   perform instruction splitting, such as breaking up a multiply
106   instruction into shifts and adds where that is profitable.
107
108   Given the memory aliasing analysis that this pass should perform,
109   it should be possible to remove redundant stores to memory, and to
110   load values from registers instead of hitting memory.
111
112   Before reload, speculative insns are moved only if a 'proof' exists
113   that no exception will be caused by this, and if no live registers
114   exist that inhibit the motion (live registers constraints are not
115   represented by data dependence edges).
116
117   This pass must update information that subsequent passes expect to
118   be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119   reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.
120
121   The information in the line number notes is carefully retained by
122   this pass.  Notes that refer to the starting and ending of
123   exception regions are also carefully retained by this pass.  All
124   other NOTE insns are grouped in their same relative order at the
125   beginning of basic blocks and regions that have been scheduled.  */
126
127#include "config.h"
128#include "system.h"
129#include "coretypes.h"
130#include "tm.h"
131#include "toplev.h"
132#include "rtl.h"
133#include "tm_p.h"
134#include "hard-reg-set.h"
135#include "regs.h"
136#include "function.h"
137#include "flags.h"
138#include "insn-config.h"
139#include "insn-attr.h"
140#include "except.h"
141#include "toplev.h"
142#include "recog.h"
143#include "sched-int.h"
144#include "target.h"
145#include "params.h"
146
147#ifdef INSN_SCHEDULING
148
149/* issue_rate is the number of insns that can be scheduled in the same
150   machine cycle.  It can be defined in the config/mach/mach.h file,
151   otherwise we set it to 1.  */
152
153static int issue_rate;
154
155/* sched-verbose controls the amount of debugging output the
156   scheduler prints.  It is controlled by -fsched-verbose=N:
157   N>0 and no -DSR : the output is directed to stderr.
158   N>=10 will direct the printouts to stderr (regardless of -dSR).
159   N=1: same as -dSR.
160   N=2: bb's probabilities, detailed ready list info, unit/insn info.
161   N=3: rtl at abort point, control-flow, regions info.
162   N=5: dependences info.  */
163
164static int sched_verbose_param = 0;
165int sched_verbose = 0;
166
167/* Debugging file.  All printouts are sent to dump, which is always set,
168   either to stderr, or to the dump listing file (-dRS).  */
169FILE *sched_dump = 0;
170
171/* Highest uid before scheduling.  */
172static int old_max_uid;
173
174/* fix_sched_param() is called from toplev.c upon detection
175   of the -fsched-verbose=N option.  */
176
177void
178fix_sched_param (const char *param, const char *val)
179{
180  if (!strcmp (param, "verbose"))
181    sched_verbose_param = atoi (val);
182  else
183    warning (0, "fix_sched_param: unknown param: %s", param);
184}
185
186struct haifa_insn_data *h_i_d;
187
188#define LINE_NOTE(INSN)		(h_i_d[INSN_UID (INSN)].line_note)
189#define INSN_TICK(INSN)		(h_i_d[INSN_UID (INSN)].tick)
190
191/* Vector indexed by basic block number giving the starting line-number
192   for each basic block.  */
193static rtx *line_note_head;
194
195/* List of important notes we must keep around.  This is a pointer to the
196   last element in the list.  */
197static rtx note_list;
198
199/* Queues, etc.  */
200
201/* An instruction is ready to be scheduled when all insns preceding it
202   have already been scheduled.  It is important to ensure that all
203   insns which use its result will not be executed until its result
204   has been computed.  An insn is maintained in one of four structures:
205
206   (P) the "Pending" set of insns which cannot be scheduled until
207   their dependencies have been satisfied.
208   (Q) the "Queued" set of insns that can be scheduled when sufficient
209   time has passed.
210   (R) the "Ready" list of unscheduled, uncommitted insns.
211   (S) the "Scheduled" list of insns.
212
213   Initially, all insns are either "Pending" or "Ready" depending on
214   whether their dependencies are satisfied.
215
216   Insns move from the "Ready" list to the "Scheduled" list as they
217   are committed to the schedule.  As this occurs, the insns in the
218   "Pending" list have their dependencies satisfied and move to either
219   the "Ready" list or the "Queued" set depending on whether
220   sufficient time has passed to make them ready.  As time passes,
221   insns move from the "Queued" set to the "Ready" list.
222
223   The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
224   insns, i.e., those that are ready, queued, and pending.
225   The "Queued" set (Q) is implemented by the variable `insn_queue'.
226   The "Ready" list (R) is implemented by the variables `ready' and
227   `n_ready'.
228   The "Scheduled" list (S) is the new insn chain built by this pass.
229
230   The transition (R->S) is implemented in the scheduling loop in
231   `schedule_block' when the best insn to schedule is chosen.
232   The transitions (P->R and P->Q) are implemented in `schedule_insn' as
233   insns move from the ready list to the scheduled list.
234   The transition (Q->R) is implemented in 'queue_to_insn' as time
235   passes or stalls are introduced.  */
236
237/* Implement a circular buffer to delay instructions until sufficient
238   time has passed.  For the new pipeline description interface,
239   MAX_INSN_QUEUE_INDEX is a power of two minus one which is larger
240   than maximal time of instruction execution computed by genattr.c on
241   the base maximal time of functional unit reservations and getting a
242   result.  This is the longest time an insn may be queued.  */
243
244static rtx *insn_queue;
245static int q_ptr = 0;
246static int q_size = 0;
247#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
248#define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
249
250/* The following variable value refers for all current and future
251   reservations of the processor units.  */
252state_t curr_state;
253
254/* The following variable value is size of memory representing all
255   current and future reservations of the processor units.  */
256static size_t dfa_state_size;
257
258/* The following array is used to find the best insn from ready when
259   the automaton pipeline interface is used.  */
260static char *ready_try;
261
262/* Describe the ready list of the scheduler.
263   VEC holds space enough for all insns in the current region.  VECLEN
264   says how many exactly.
265   FIRST is the index of the element with the highest priority; i.e. the
266   last one in the ready list, since elements are ordered by ascending
267   priority.
268   N_READY determines how many insns are on the ready list.  */
269
270struct ready_list
271{
272  rtx *vec;
273  int veclen;
274  int first;
275  int n_ready;
276};
277
278static int may_trap_exp (rtx, int);
279
280/* Nonzero iff the address is comprised from at most 1 register.  */
281#define CONST_BASED_ADDRESS_P(x)			\
282  (REG_P (x)					\
283   || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS	\
284	|| (GET_CODE (x) == LO_SUM))			\
285       && (CONSTANT_P (XEXP (x, 0))			\
286	   || CONSTANT_P (XEXP (x, 1)))))
287
288/* Returns a class that insn with GET_DEST(insn)=x may belong to,
289   as found by analyzing insn's expression.  */
290
291static int
292may_trap_exp (rtx x, int is_store)
293{
294  enum rtx_code code;
295
296  if (x == 0)
297    return TRAP_FREE;
298  code = GET_CODE (x);
299  if (is_store)
300    {
301      if (code == MEM && may_trap_p (x))
302	return TRAP_RISKY;
303      else
304	return TRAP_FREE;
305    }
306  if (code == MEM)
307    {
308      /* The insn uses memory:  a volatile load.  */
309      if (MEM_VOLATILE_P (x))
310	return IRISKY;
311      /* An exception-free load.  */
312      if (!may_trap_p (x))
313	return IFREE;
314      /* A load with 1 base register, to be further checked.  */
315      if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
316	return PFREE_CANDIDATE;
317      /* No info on the load, to be further checked.  */
318      return PRISKY_CANDIDATE;
319    }
320  else
321    {
322      const char *fmt;
323      int i, insn_class = TRAP_FREE;
324
325      /* Neither store nor load, check if it may cause a trap.  */
326      if (may_trap_p (x))
327	return TRAP_RISKY;
328      /* Recursive step: walk the insn...  */
329      fmt = GET_RTX_FORMAT (code);
330      for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
331	{
332	  if (fmt[i] == 'e')
333	    {
334	      int tmp_class = may_trap_exp (XEXP (x, i), is_store);
335	      insn_class = WORST_CLASS (insn_class, tmp_class);
336	    }
337	  else if (fmt[i] == 'E')
338	    {
339	      int j;
340	      for (j = 0; j < XVECLEN (x, i); j++)
341		{
342		  int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
343		  insn_class = WORST_CLASS (insn_class, tmp_class);
344		  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
345		    break;
346		}
347	    }
348	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
349	    break;
350	}
351      return insn_class;
352    }
353}
354
355/* Classifies insn for the purpose of verifying that it can be
356   moved speculatively, by examining it's patterns, returning:
357   TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
358   TRAP_FREE: non-load insn.
359   IFREE: load from a globally safe location.
360   IRISKY: volatile load.
361   PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
362   being either PFREE or PRISKY.  */
363
364int
365haifa_classify_insn (rtx insn)
366{
367  rtx pat = PATTERN (insn);
368  int tmp_class = TRAP_FREE;
369  int insn_class = TRAP_FREE;
370  enum rtx_code code;
371
372  if (GET_CODE (pat) == PARALLEL)
373    {
374      int i, len = XVECLEN (pat, 0);
375
376      for (i = len - 1; i >= 0; i--)
377	{
378	  code = GET_CODE (XVECEXP (pat, 0, i));
379	  switch (code)
380	    {
381	    case CLOBBER:
382	      /* Test if it is a 'store'.  */
383	      tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
384	      break;
385	    case SET:
386	      /* Test if it is a store.  */
387	      tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
388	      if (tmp_class == TRAP_RISKY)
389		break;
390	      /* Test if it is a load.  */
391	      tmp_class
392		= WORST_CLASS (tmp_class,
393			       may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
394					     0));
395	      break;
396	    case COND_EXEC:
397	    case TRAP_IF:
398	      tmp_class = TRAP_RISKY;
399	      break;
400	    default:
401	      ;
402	    }
403	  insn_class = WORST_CLASS (insn_class, tmp_class);
404	  if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405	    break;
406	}
407    }
408  else
409    {
410      code = GET_CODE (pat);
411      switch (code)
412	{
413	case CLOBBER:
414	  /* Test if it is a 'store'.  */
415	  tmp_class = may_trap_exp (XEXP (pat, 0), 1);
416	  break;
417	case SET:
418	  /* Test if it is a store.  */
419	  tmp_class = may_trap_exp (SET_DEST (pat), 1);
420	  if (tmp_class == TRAP_RISKY)
421	    break;
422	  /* Test if it is a load.  */
423	  tmp_class =
424	    WORST_CLASS (tmp_class,
425			 may_trap_exp (SET_SRC (pat), 0));
426	  break;
427	case COND_EXEC:
428	case TRAP_IF:
429	  tmp_class = TRAP_RISKY;
430	  break;
431	default:;
432	}
433      insn_class = tmp_class;
434    }
435
436  return insn_class;
437}
438
439/* Forward declarations.  */
440
441static int priority (rtx);
442static int rank_for_schedule (const void *, const void *);
443static void swap_sort (rtx *, int);
444static void queue_insn (rtx, int);
445static int schedule_insn (rtx, struct ready_list *, int);
446static int find_set_reg_weight (rtx);
447static void find_insn_reg_weight (int);
448static void adjust_priority (rtx);
449static void advance_one_cycle (void);
450
451/* Notes handling mechanism:
452   =========================
453   Generally, NOTES are saved before scheduling and restored after scheduling.
454   The scheduler distinguishes between three types of notes:
455
456   (1) LINE_NUMBER notes, generated and used for debugging.  Here,
457   before scheduling a region, a pointer to the LINE_NUMBER note is
458   added to the insn following it (in save_line_notes()), and the note
459   is removed (in rm_line_notes() and unlink_line_notes()).  After
460   scheduling the region, this pointer is used for regeneration of
461   the LINE_NUMBER note (in restore_line_notes()).
462
463   (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
464   Before scheduling a region, a pointer to the note is added to the insn
465   that follows or precedes it.  (This happens as part of the data dependence
466   computation).  After scheduling an insn, the pointer contained in it is
467   used for regenerating the corresponding note (in reemit_notes).
468
469   (3) All other notes (e.g. INSN_DELETED):  Before scheduling a block,
470   these notes are put in a list (in rm_other_notes() and
471   unlink_other_notes ()).  After scheduling the block, these notes are
472   inserted at the beginning of the block (in schedule_block()).  */
473
474static rtx unlink_other_notes (rtx, rtx);
475static rtx unlink_line_notes (rtx, rtx);
476static rtx reemit_notes (rtx, rtx);
477
478static rtx *ready_lastpos (struct ready_list *);
479static void ready_sort (struct ready_list *);
480static rtx ready_remove_first (struct ready_list *);
481
482static void queue_to_ready (struct ready_list *);
483static int early_queue_to_ready (state_t, struct ready_list *);
484
485static void debug_ready_list (struct ready_list *);
486
487static rtx move_insn1 (rtx, rtx);
488static rtx move_insn (rtx, rtx);
489
490/* The following functions are used to implement multi-pass scheduling
491   on the first cycle.  */
492static rtx ready_element (struct ready_list *, int);
493static rtx ready_remove (struct ready_list *, int);
494static int max_issue (struct ready_list *, int *);
495
496static rtx choose_ready (struct ready_list *);
497
498#endif /* INSN_SCHEDULING */
499
500/* Point to state used for the current scheduling pass.  */
501struct sched_info *current_sched_info;
502
503#ifndef INSN_SCHEDULING
504void
505schedule_insns (FILE *dump_file ATTRIBUTE_UNUSED)
506{
507}
508#else
509
510/* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
511   so that insns independent of the last scheduled insn will be preferred
512   over dependent instructions.  */
513
514static rtx last_scheduled_insn;
515
516/* Compute cost of executing INSN given the dependence LINK on the insn USED.
517   This is the number of cycles between instruction issue and
518   instruction results.  */
519
520HAIFA_INLINE int
521insn_cost (rtx insn, rtx link, rtx used)
522{
523  int cost = INSN_COST (insn);
524
525  if (cost < 0)
526    {
527      /* A USE insn, or something else we don't need to
528	 understand.  We can't pass these directly to
529	 result_ready_cost or insn_default_latency because it will
530	 trigger a fatal error for unrecognizable insns.  */
531      if (recog_memoized (insn) < 0)
532	{
533	  INSN_COST (insn) = 0;
534	  return 0;
535	}
536      else
537	{
538	  cost = insn_default_latency (insn);
539	  if (cost < 0)
540	    cost = 0;
541
542	  INSN_COST (insn) = cost;
543	}
544    }
545
546  /* In this case estimate cost without caring how insn is used.  */
547  if (link == 0 || used == 0)
548    return cost;
549
550  /* A USE insn should never require the value used to be computed.
551     This allows the computation of a function's result and parameter
552     values to overlap the return and call.  */
553  if (recog_memoized (used) < 0)
554    cost = 0;
555  else
556    {
557      if (INSN_CODE (insn) >= 0)
558	{
559	  if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
560	    cost = 0;
561	  else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
562	    {
563	      cost = (insn_default_latency (insn)
564		      - insn_default_latency (used));
565	      if (cost <= 0)
566		cost = 1;
567	    }
568	  else if (bypass_p (insn))
569	    cost = insn_latency (insn, used);
570	}
571
572      if (targetm.sched.adjust_cost)
573	cost = targetm.sched.adjust_cost (used, link, insn, cost);
574
575      if (cost < 0)
576	cost = 0;
577    }
578
579  return cost;
580}
581
582/* Compute the priority number for INSN.  */
583
584static int
585priority (rtx insn)
586{
587  rtx link;
588
589  if (! INSN_P (insn))
590    return 0;
591
592  if (! INSN_PRIORITY_KNOWN (insn))
593    {
594      int this_priority = 0;
595
596      if (INSN_DEPEND (insn) == 0)
597	this_priority = insn_cost (insn, 0, 0);
598      else
599	{
600	  for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
601	    {
602	      rtx next;
603	      int next_priority;
604
605	      next = XEXP (link, 0);
606
607	      /* Critical path is meaningful in block boundaries only.  */
608	      if (! (*current_sched_info->contributes_to_priority) (next, insn))
609		continue;
610
611	      next_priority = insn_cost (insn, link, next) + priority (next);
612	      if (next_priority > this_priority)
613		this_priority = next_priority;
614	    }
615	}
616      INSN_PRIORITY (insn) = this_priority;
617      INSN_PRIORITY_KNOWN (insn) = 1;
618    }
619
620  return INSN_PRIORITY (insn);
621}
622
623/* Macros and functions for keeping the priority queue sorted, and
624   dealing with queuing and dequeuing of instructions.  */
625
626#define SCHED_SORT(READY, N_READY)                                   \
627do { if ((N_READY) == 2)				             \
628       swap_sort (READY, N_READY);			             \
629     else if ((N_READY) > 2)                                         \
630         qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); }  \
631while (0)
632
633/* Returns a positive value if x is preferred; returns a negative value if
634   y is preferred.  Should never return 0, since that will make the sort
635   unstable.  */
636
637static int
638rank_for_schedule (const void *x, const void *y)
639{
640  rtx tmp = *(const rtx *) y;
641  rtx tmp2 = *(const rtx *) x;
642  rtx link;
643  int tmp_class, tmp2_class, depend_count1, depend_count2;
644  int val, priority_val, weight_val, info_val;
645
646  /* The insn in a schedule group should be issued the first.  */
647  if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
648    return SCHED_GROUP_P (tmp2) ? 1 : -1;
649
650  /* Prefer insn with higher priority.  */
651  priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
652
653  if (priority_val)
654    return priority_val;
655
656  /* Prefer an insn with smaller contribution to registers-pressure.  */
657  if (!reload_completed &&
658      (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
659    return weight_val;
660
661  info_val = (*current_sched_info->rank) (tmp, tmp2);
662  if (info_val)
663    return info_val;
664
665  /* Compare insns based on their relation to the last-scheduled-insn.  */
666  if (last_scheduled_insn)
667    {
668      /* Classify the instructions into three classes:
669         1) Data dependent on last schedule insn.
670         2) Anti/Output dependent on last scheduled insn.
671         3) Independent of last scheduled insn, or has latency of one.
672         Choose the insn from the highest numbered class if different.  */
673      link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
674      if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
675	tmp_class = 3;
676      else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
677	tmp_class = 1;
678      else
679	tmp_class = 2;
680
681      link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
682      if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
683	tmp2_class = 3;
684      else if (REG_NOTE_KIND (link) == 0)	/* Data dependence.  */
685	tmp2_class = 1;
686      else
687	tmp2_class = 2;
688
689      if ((val = tmp2_class - tmp_class))
690	return val;
691    }
692
693  /* Prefer the insn which has more later insns that depend on it.
694     This gives the scheduler more freedom when scheduling later
695     instructions at the expense of added register pressure.  */
696  depend_count1 = 0;
697  for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
698    depend_count1++;
699
700  depend_count2 = 0;
701  for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
702    depend_count2++;
703
704  val = depend_count2 - depend_count1;
705  if (val)
706    return val;
707
708  /* If insns are equally good, sort by INSN_LUID (original insn order),
709     so that we make the sort stable.  This minimizes instruction movement,
710     thus minimizing sched's effect on debugging and cross-jumping.  */
711  return INSN_LUID (tmp) - INSN_LUID (tmp2);
712}
713
714/* Resort the array A in which only element at index N may be out of order.  */
715
716HAIFA_INLINE static void
717swap_sort (rtx *a, int n)
718{
719  rtx insn = a[n - 1];
720  int i = n - 2;
721
722  while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
723    {
724      a[i + 1] = a[i];
725      i -= 1;
726    }
727  a[i + 1] = insn;
728}
729
730/* Add INSN to the insn queue so that it can be executed at least
731   N_CYCLES after the currently executing insn.  Preserve insns
732   chain for debugging purposes.  */
733
734HAIFA_INLINE static void
735queue_insn (rtx insn, int n_cycles)
736{
737  int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
738  rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
739  insn_queue[next_q] = link;
740  q_size += 1;
741
742  if (sched_verbose >= 2)
743    {
744      fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
745	       (*current_sched_info->print_insn) (insn, 0));
746
747      fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
748    }
749}
750
751/* Return a pointer to the bottom of the ready list, i.e. the insn
752   with the lowest priority.  */
753
754HAIFA_INLINE static rtx *
755ready_lastpos (struct ready_list *ready)
756{
757  gcc_assert (ready->n_ready);
758  return ready->vec + ready->first - ready->n_ready + 1;
759}
760
761/* Add an element INSN to the ready list so that it ends up with the lowest
762   priority.  */
763
764HAIFA_INLINE void
765ready_add (struct ready_list *ready, rtx insn)
766{
767  if (ready->first == ready->n_ready)
768    {
769      memmove (ready->vec + ready->veclen - ready->n_ready,
770	       ready_lastpos (ready),
771	       ready->n_ready * sizeof (rtx));
772      ready->first = ready->veclen - 1;
773    }
774  ready->vec[ready->first - ready->n_ready] = insn;
775  ready->n_ready++;
776}
777
778/* Remove the element with the highest priority from the ready list and
779   return it.  */
780
781HAIFA_INLINE static rtx
782ready_remove_first (struct ready_list *ready)
783{
784  rtx t;
785
786  gcc_assert (ready->n_ready);
787  t = ready->vec[ready->first--];
788  ready->n_ready--;
789  /* If the queue becomes empty, reset it.  */
790  if (ready->n_ready == 0)
791    ready->first = ready->veclen - 1;
792  return t;
793}
794
795/* The following code implements multi-pass scheduling for the first
796   cycle.  In other words, we will try to choose ready insn which
797   permits to start maximum number of insns on the same cycle.  */
798
799/* Return a pointer to the element INDEX from the ready.  INDEX for
800   insn with the highest priority is 0, and the lowest priority has
801   N_READY - 1.  */
802
803HAIFA_INLINE static rtx
804ready_element (struct ready_list *ready, int index)
805{
806  gcc_assert (ready->n_ready && index < ready->n_ready);
807
808  return ready->vec[ready->first - index];
809}
810
811/* Remove the element INDEX from the ready list and return it.  INDEX
812   for insn with the highest priority is 0, and the lowest priority
813   has N_READY - 1.  */
814
815HAIFA_INLINE static rtx
816ready_remove (struct ready_list *ready, int index)
817{
818  rtx t;
819  int i;
820
821  if (index == 0)
822    return ready_remove_first (ready);
823  gcc_assert (ready->n_ready && index < ready->n_ready);
824  t = ready->vec[ready->first - index];
825  ready->n_ready--;
826  for (i = index; i < ready->n_ready; i++)
827    ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
828  return t;
829}
830
831
832/* Sort the ready list READY by ascending priority, using the SCHED_SORT
833   macro.  */
834
835HAIFA_INLINE static void
836ready_sort (struct ready_list *ready)
837{
838  rtx *first = ready_lastpos (ready);
839  SCHED_SORT (first, ready->n_ready);
840}
841
842/* PREV is an insn that is ready to execute.  Adjust its priority if that
843   will help shorten or lengthen register lifetimes as appropriate.  Also
844   provide a hook for the target to tweek itself.  */
845
846HAIFA_INLINE static void
847adjust_priority (rtx prev)
848{
849  /* ??? There used to be code here to try and estimate how an insn
850     affected register lifetimes, but it did it by looking at REG_DEAD
851     notes, which we removed in schedule_region.  Nor did it try to
852     take into account register pressure or anything useful like that.
853
854     Revisit when we have a machine model to work with and not before.  */
855
856  if (targetm.sched.adjust_priority)
857    INSN_PRIORITY (prev) =
858      targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
859}
860
861/* Advance time on one cycle.  */
862HAIFA_INLINE static void
863advance_one_cycle (void)
864{
865  if (targetm.sched.dfa_pre_cycle_insn)
866    state_transition (curr_state,
867		      targetm.sched.dfa_pre_cycle_insn ());
868
869  state_transition (curr_state, NULL);
870
871  if (targetm.sched.dfa_post_cycle_insn)
872    state_transition (curr_state,
873		      targetm.sched.dfa_post_cycle_insn ());
874}
875
876/* Clock at which the previous instruction was issued.  */
877static int last_clock_var;
878
879/* INSN is the "currently executing insn".  Launch each insn which was
880   waiting on INSN.  READY is the ready list which contains the insns
881   that are ready to fire.  CLOCK is the current cycle.  The function
882   returns necessary cycle advance after issuing the insn (it is not
883   zero for insns in a schedule group).  */
884
885static int
886schedule_insn (rtx insn, struct ready_list *ready, int clock)
887{
888  rtx link;
889  int advance = 0;
890  int premature_issue = 0;
891
892  if (sched_verbose >= 1)
893    {
894      char buf[2048];
895
896      print_insn (buf, insn, 0);
897      buf[40] = 0;
898      fprintf (sched_dump, ";;\t%3i--> %-40s:", clock, buf);
899
900      if (recog_memoized (insn) < 0)
901	fprintf (sched_dump, "nothing");
902      else
903	print_reservation (sched_dump, insn);
904      fputc ('\n', sched_dump);
905    }
906
907  if (INSN_TICK (insn) > clock)
908    {
909      /* 'insn' has been prematurely moved from the queue to the
910	 ready list.  */
911      premature_issue = INSN_TICK (insn) - clock;
912    }
913
914  for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
915    {
916      rtx next = XEXP (link, 0);
917      int cost = insn_cost (insn, link, next);
918
919      INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost + premature_issue);
920
921      if ((INSN_DEP_COUNT (next) -= 1) == 0)
922	{
923	  int effective_cost = INSN_TICK (next) - clock;
924
925	  if (! (*current_sched_info->new_ready) (next))
926	    continue;
927
928	  if (sched_verbose >= 2)
929	    {
930	      fprintf (sched_dump, ";;\t\tdependences resolved: insn %s ",
931		       (*current_sched_info->print_insn) (next, 0));
932
933	      if (effective_cost < 1)
934		fprintf (sched_dump, "into ready\n");
935	      else
936		fprintf (sched_dump, "into queue with cost=%d\n",
937			 effective_cost);
938	    }
939
940	  /* Adjust the priority of NEXT and either put it on the ready
941	     list or queue it.  */
942	  adjust_priority (next);
943	  if (effective_cost < 1)
944	    ready_add (ready, next);
945	  else
946	    {
947	      queue_insn (next, effective_cost);
948
949	      if (SCHED_GROUP_P (next) && advance < effective_cost)
950		advance = effective_cost;
951	    }
952	}
953    }
954
955  /* Annotate the instruction with issue information -- TImode
956     indicates that the instruction is expected not to be able
957     to issue on the same cycle as the previous insn.  A machine
958     may use this information to decide how the instruction should
959     be aligned.  */
960  if (issue_rate > 1
961      && GET_CODE (PATTERN (insn)) != USE
962      && GET_CODE (PATTERN (insn)) != CLOBBER)
963    {
964      if (reload_completed)
965	PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode);
966      last_clock_var = clock;
967    }
968  return advance;
969}
970
971/* Functions for handling of notes.  */
972
973/* Delete notes beginning with INSN and put them in the chain
974   of notes ended by NOTE_LIST.
975   Returns the insn following the notes.  */
976
977static rtx
978unlink_other_notes (rtx insn, rtx tail)
979{
980  rtx prev = PREV_INSN (insn);
981
982  while (insn != tail && NOTE_P (insn))
983    {
984      rtx next = NEXT_INSN (insn);
985      /* Delete the note from its current position.  */
986      if (prev)
987	NEXT_INSN (prev) = next;
988      if (next)
989	PREV_INSN (next) = prev;
990
991      /* See sched_analyze to see how these are handled.  */
992      if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
993	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
994	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
995	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
996	  && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
997	{
998	  /* Insert the note at the end of the notes list.  */
999	  PREV_INSN (insn) = note_list;
1000	  if (note_list)
1001	    NEXT_INSN (note_list) = insn;
1002	  note_list = insn;
1003	}
1004
1005      insn = next;
1006    }
1007  return insn;
1008}
1009
1010/* Delete line notes beginning with INSN. Record line-number notes so
1011   they can be reused.  Returns the insn following the notes.  */
1012
1013static rtx
1014unlink_line_notes (rtx insn, rtx tail)
1015{
1016  rtx prev = PREV_INSN (insn);
1017
1018  while (insn != tail && NOTE_P (insn))
1019    {
1020      rtx next = NEXT_INSN (insn);
1021
1022      if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1023	{
1024	  /* Delete the note from its current position.  */
1025	  if (prev)
1026	    NEXT_INSN (prev) = next;
1027	  if (next)
1028	    PREV_INSN (next) = prev;
1029
1030	  /* Record line-number notes so they can be reused.  */
1031	  LINE_NOTE (insn) = insn;
1032	}
1033      else
1034	prev = insn;
1035
1036      insn = next;
1037    }
1038  return insn;
1039}
1040
1041/* Return the head and tail pointers of BB.  */
1042
1043void
1044get_block_head_tail (int b, rtx *headp, rtx *tailp)
1045{
1046  /* HEAD and TAIL delimit the basic block being scheduled.  */
1047  rtx head = BB_HEAD (BASIC_BLOCK (b));
1048  rtx tail = BB_END (BASIC_BLOCK (b));
1049
1050  /* Don't include any notes or labels at the beginning of the
1051     basic block, or notes at the ends of basic blocks.  */
1052  while (head != tail)
1053    {
1054      if (NOTE_P (head))
1055	head = NEXT_INSN (head);
1056      else if (NOTE_P (tail))
1057	tail = PREV_INSN (tail);
1058      else if (LABEL_P (head))
1059	head = NEXT_INSN (head);
1060      else
1061	break;
1062    }
1063
1064  *headp = head;
1065  *tailp = tail;
1066}
1067
1068/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ].  */
1069
1070int
1071no_real_insns_p (rtx head, rtx tail)
1072{
1073  while (head != NEXT_INSN (tail))
1074    {
1075      if (!NOTE_P (head) && !LABEL_P (head))
1076	return 0;
1077      head = NEXT_INSN (head);
1078    }
1079  return 1;
1080}
1081
1082/* Delete line notes from one block. Save them so they can be later restored
1083   (in restore_line_notes).  HEAD and TAIL are the boundaries of the
1084   block in which notes should be processed.  */
1085
1086void
1087rm_line_notes (rtx head, rtx tail)
1088{
1089  rtx next_tail;
1090  rtx insn;
1091
1092  next_tail = NEXT_INSN (tail);
1093  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1094    {
1095      rtx prev;
1096
1097      /* Farm out notes, and maybe save them in NOTE_LIST.
1098         This is needed to keep the debugger from
1099         getting completely deranged.  */
1100      if (NOTE_P (insn))
1101	{
1102	  prev = insn;
1103	  insn = unlink_line_notes (insn, next_tail);
1104
1105	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1106	}
1107    }
1108}
1109
1110/* Save line number notes for each insn in block B.  HEAD and TAIL are
1111   the boundaries of the block in which notes should be processed.  */
1112
1113void
1114save_line_notes (int b, rtx head, rtx tail)
1115{
1116  rtx next_tail;
1117
1118  /* We must use the true line number for the first insn in the block
1119     that was computed and saved at the start of this pass.  We can't
1120     use the current line number, because scheduling of the previous
1121     block may have changed the current line number.  */
1122
1123  rtx line = line_note_head[b];
1124  rtx insn;
1125
1126  next_tail = NEXT_INSN (tail);
1127
1128  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1129    if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1130      line = insn;
1131    else
1132      LINE_NOTE (insn) = line;
1133}
1134
1135/* After a block was scheduled, insert line notes into the insns list.
1136   HEAD and TAIL are the boundaries of the block in which notes should
1137   be processed.  */
1138
1139void
1140restore_line_notes (rtx head, rtx tail)
1141{
1142  rtx line, note, prev, new;
1143  int added_notes = 0;
1144  rtx next_tail, insn;
1145
1146  head = head;
1147  next_tail = NEXT_INSN (tail);
1148
1149  /* Determine the current line-number.  We want to know the current
1150     line number of the first insn of the block here, in case it is
1151     different from the true line number that was saved earlier.  If
1152     different, then we need a line number note before the first insn
1153     of this block.  If it happens to be the same, then we don't want to
1154     emit another line number note here.  */
1155  for (line = head; line; line = PREV_INSN (line))
1156    if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1157      break;
1158
1159  /* Walk the insns keeping track of the current line-number and inserting
1160     the line-number notes as needed.  */
1161  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1162    if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1163      line = insn;
1164  /* This used to emit line number notes before every non-deleted note.
1165     However, this confuses a debugger, because line notes not separated
1166     by real instructions all end up at the same address.  I can find no
1167     use for line number notes before other notes, so none are emitted.  */
1168    else if (!NOTE_P (insn)
1169	     && INSN_UID (insn) < old_max_uid
1170	     && (note = LINE_NOTE (insn)) != 0
1171	     && note != line
1172	     && (line == 0
1173#ifdef USE_MAPPED_LOCATION
1174		 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1175#else
1176		 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1177		 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1178#endif
1179		 ))
1180      {
1181	line = note;
1182	prev = PREV_INSN (insn);
1183	if (LINE_NOTE (note))
1184	  {
1185	    /* Re-use the original line-number note.  */
1186	    LINE_NOTE (note) = 0;
1187	    PREV_INSN (note) = prev;
1188	    NEXT_INSN (prev) = note;
1189	    PREV_INSN (insn) = note;
1190	    NEXT_INSN (note) = insn;
1191	  }
1192	else
1193	  {
1194	    added_notes++;
1195	    new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1196#ifndef USE_MAPPED_LOCATION
1197	    NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1198#endif
1199	  }
1200      }
1201  if (sched_verbose && added_notes)
1202    fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1203}
1204
1205/* After scheduling the function, delete redundant line notes from the
1206   insns list.  */
1207
1208void
1209rm_redundant_line_notes (void)
1210{
1211  rtx line = 0;
1212  rtx insn = get_insns ();
1213  int active_insn = 0;
1214  int notes = 0;
1215
1216  /* Walk the insns deleting redundant line-number notes.  Many of these
1217     are already present.  The remainder tend to occur at basic
1218     block boundaries.  */
1219  for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1220    if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1221      {
1222	/* If there are no active insns following, INSN is redundant.  */
1223	if (active_insn == 0)
1224	  {
1225	    notes++;
1226	    SET_INSN_DELETED (insn);
1227	  }
1228	/* If the line number is unchanged, LINE is redundant.  */
1229	else if (line
1230#ifdef USE_MAPPED_LOCATION
1231		 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1232#else
1233		 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1234		 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1235#endif
1236)
1237	  {
1238	    notes++;
1239	    SET_INSN_DELETED (line);
1240	    line = insn;
1241	  }
1242	else
1243	  line = insn;
1244	active_insn = 0;
1245      }
1246    else if (!((NOTE_P (insn)
1247		&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1248	       || (NONJUMP_INSN_P (insn)
1249		   && (GET_CODE (PATTERN (insn)) == USE
1250		       || GET_CODE (PATTERN (insn)) == CLOBBER))))
1251      active_insn++;
1252
1253  if (sched_verbose && notes)
1254    fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1255}
1256
1257/* Delete notes between HEAD and TAIL and put them in the chain
1258   of notes ended by NOTE_LIST.  */
1259
1260void
1261rm_other_notes (rtx head, rtx tail)
1262{
1263  rtx next_tail;
1264  rtx insn;
1265
1266  note_list = 0;
1267  if (head == tail && (! INSN_P (head)))
1268    return;
1269
1270  next_tail = NEXT_INSN (tail);
1271  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1272    {
1273      rtx prev;
1274
1275      /* Farm out notes, and maybe save them in NOTE_LIST.
1276         This is needed to keep the debugger from
1277         getting completely deranged.  */
1278      if (NOTE_P (insn))
1279	{
1280	  prev = insn;
1281
1282	  insn = unlink_other_notes (insn, next_tail);
1283
1284	  gcc_assert (prev != tail && prev != head && insn != next_tail);
1285	}
1286    }
1287}
1288
1289/* Functions for computation of registers live/usage info.  */
1290
1291/* This function looks for a new register being defined.
1292   If the destination register is already used by the source,
1293   a new register is not needed.  */
1294
1295static int
1296find_set_reg_weight (rtx x)
1297{
1298  if (GET_CODE (x) == CLOBBER
1299      && register_operand (SET_DEST (x), VOIDmode))
1300    return 1;
1301  if (GET_CODE (x) == SET
1302      && register_operand (SET_DEST (x), VOIDmode))
1303    {
1304      if (REG_P (SET_DEST (x)))
1305	{
1306	  if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1307	    return 1;
1308	  else
1309	    return 0;
1310	}
1311      return 1;
1312    }
1313  return 0;
1314}
1315
1316/* Calculate INSN_REG_WEIGHT for all insns of a block.  */
1317
1318static void
1319find_insn_reg_weight (int b)
1320{
1321  rtx insn, next_tail, head, tail;
1322
1323  get_block_head_tail (b, &head, &tail);
1324  next_tail = NEXT_INSN (tail);
1325
1326  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1327    {
1328      int reg_weight = 0;
1329      rtx x;
1330
1331      /* Handle register life information.  */
1332      if (! INSN_P (insn))
1333	continue;
1334
1335      /* Increment weight for each register born here.  */
1336      x = PATTERN (insn);
1337      reg_weight += find_set_reg_weight (x);
1338      if (GET_CODE (x) == PARALLEL)
1339	{
1340	  int j;
1341	  for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1342	    {
1343	      x = XVECEXP (PATTERN (insn), 0, j);
1344	      reg_weight += find_set_reg_weight (x);
1345	    }
1346	}
1347      /* Decrement weight for each register that dies here.  */
1348      for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1349	{
1350	  if (REG_NOTE_KIND (x) == REG_DEAD
1351	      || REG_NOTE_KIND (x) == REG_UNUSED)
1352	    reg_weight--;
1353	}
1354
1355      INSN_REG_WEIGHT (insn) = reg_weight;
1356    }
1357}
1358
1359/* Scheduling clock, modified in schedule_block() and queue_to_ready ().  */
1360static int clock_var;
1361
1362/* Move insns that became ready to fire from queue to ready list.  */
1363
1364static void
1365queue_to_ready (struct ready_list *ready)
1366{
1367  rtx insn;
1368  rtx link;
1369
1370  q_ptr = NEXT_Q (q_ptr);
1371
1372  /* Add all pending insns that can be scheduled without stalls to the
1373     ready list.  */
1374  for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1375    {
1376      insn = XEXP (link, 0);
1377      q_size -= 1;
1378
1379      if (sched_verbose >= 2)
1380	fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1381		 (*current_sched_info->print_insn) (insn, 0));
1382
1383      /* If the ready list is full, delay the insn for 1 cycle.
1384	 See the comment in schedule_block for the rationale.  */
1385      if (!reload_completed
1386	  && ready->n_ready > MAX_SCHED_READY_INSNS
1387	  && !SCHED_GROUP_P (insn))
1388	{
1389	  if (sched_verbose >= 2)
1390	    fprintf (sched_dump, "requeued because ready full\n");
1391	  queue_insn (insn, 1);
1392	}
1393      else
1394	{
1395	  ready_add (ready, insn);
1396	  if (sched_verbose >= 2)
1397	    fprintf (sched_dump, "moving to ready without stalls\n");
1398        }
1399    }
1400  insn_queue[q_ptr] = 0;
1401
1402  /* If there are no ready insns, stall until one is ready and add all
1403     of the pending insns at that point to the ready list.  */
1404  if (ready->n_ready == 0)
1405    {
1406      int stalls;
1407
1408      for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1409	{
1410	  if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1411	    {
1412	      for (; link; link = XEXP (link, 1))
1413		{
1414		  insn = XEXP (link, 0);
1415		  q_size -= 1;
1416
1417		  if (sched_verbose >= 2)
1418		    fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1419			     (*current_sched_info->print_insn) (insn, 0));
1420
1421		  ready_add (ready, insn);
1422		  if (sched_verbose >= 2)
1423		    fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1424		}
1425	      insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
1426
1427	      advance_one_cycle ();
1428
1429	      break;
1430	    }
1431
1432	  advance_one_cycle ();
1433	}
1434
1435      q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1436      clock_var += stalls;
1437    }
1438}
1439
1440/* Used by early_queue_to_ready.  Determines whether it is "ok" to
1441   prematurely move INSN from the queue to the ready list.  Currently,
1442   if a target defines the hook 'is_costly_dependence', this function
1443   uses the hook to check whether there exist any dependences which are
1444   considered costly by the target, between INSN and other insns that
1445   have already been scheduled.  Dependences are checked up to Y cycles
1446   back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1447   controlling this value.
1448   (Other considerations could be taken into account instead (or in
1449   addition) depending on user flags and target hooks.  */
1450
1451static bool
1452ok_for_early_queue_removal (rtx insn)
1453{
1454  int n_cycles;
1455  rtx prev_insn = last_scheduled_insn;
1456
1457  if (targetm.sched.is_costly_dependence)
1458    {
1459      for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1460	{
1461	  for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1462	    {
1463	      rtx dep_link = 0;
1464	      int dep_cost;
1465
1466	      if (!NOTE_P (prev_insn))
1467		{
1468		  dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1469		  if (dep_link)
1470		    {
1471		      dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1472		      if (targetm.sched.is_costly_dependence (prev_insn, insn,
1473				dep_link, dep_cost,
1474				flag_sched_stalled_insns_dep - n_cycles))
1475			return false;
1476		    }
1477		}
1478
1479	      if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1480		break;
1481	    }
1482
1483	  if (!prev_insn)
1484	    break;
1485	  prev_insn = PREV_INSN (prev_insn);
1486	}
1487    }
1488
1489  return true;
1490}
1491
1492
1493/* Remove insns from the queue, before they become "ready" with respect
1494   to FU latency considerations.  */
1495
1496static int
1497early_queue_to_ready (state_t state, struct ready_list *ready)
1498{
1499  rtx insn;
1500  rtx link;
1501  rtx next_link;
1502  rtx prev_link;
1503  bool move_to_ready;
1504  int cost;
1505  state_t temp_state = alloca (dfa_state_size);
1506  int stalls;
1507  int insns_removed = 0;
1508
1509  /*
1510     Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1511     function:
1512
1513     X == 0: There is no limit on how many queued insns can be removed
1514             prematurely.  (flag_sched_stalled_insns = -1).
1515
1516     X >= 1: Only X queued insns can be removed prematurely in each
1517	     invocation.  (flag_sched_stalled_insns = X).
1518
1519     Otherwise: Early queue removal is disabled.
1520         (flag_sched_stalled_insns = 0)
1521  */
1522
1523  if (! flag_sched_stalled_insns)
1524    return 0;
1525
1526  for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1527    {
1528      if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1529	{
1530	  if (sched_verbose > 6)
1531	    fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1532
1533	  prev_link = 0;
1534	  while (link)
1535	    {
1536	      next_link = XEXP (link, 1);
1537	      insn = XEXP (link, 0);
1538	      if (insn && sched_verbose > 6)
1539		print_rtl_single (sched_dump, insn);
1540
1541	      memcpy (temp_state, state, dfa_state_size);
1542	      if (recog_memoized (insn) < 0)
1543		/* non-negative to indicate that it's not ready
1544		   to avoid infinite Q->R->Q->R... */
1545		cost = 0;
1546	      else
1547		cost = state_transition (temp_state, insn);
1548
1549	      if (sched_verbose >= 6)
1550		fprintf (sched_dump, "transition cost = %d\n", cost);
1551
1552	      move_to_ready = false;
1553	      if (cost < 0)
1554		{
1555		  move_to_ready = ok_for_early_queue_removal (insn);
1556		  if (move_to_ready == true)
1557		    {
1558		      /* move from Q to R */
1559		      q_size -= 1;
1560		      ready_add (ready, insn);
1561
1562		      if (prev_link)
1563			XEXP (prev_link, 1) = next_link;
1564		      else
1565			insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1566
1567		      free_INSN_LIST_node (link);
1568
1569		      if (sched_verbose >= 2)
1570			fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1571				 (*current_sched_info->print_insn) (insn, 0));
1572
1573		      insns_removed++;
1574		      if (insns_removed == flag_sched_stalled_insns)
1575			/* Remove only one insn from Q at a time.  */
1576			return insns_removed;
1577		    }
1578		}
1579
1580	      if (move_to_ready == false)
1581		prev_link = link;
1582
1583	      link = next_link;
1584	    } /* while link */
1585	} /* if link */
1586
1587    } /* for stalls.. */
1588
1589  return insns_removed;
1590}
1591
1592
1593/* Print the ready list for debugging purposes.  Callable from debugger.  */
1594
1595static void
1596debug_ready_list (struct ready_list *ready)
1597{
1598  rtx *p;
1599  int i;
1600
1601  if (ready->n_ready == 0)
1602    {
1603      fprintf (sched_dump, "\n");
1604      return;
1605    }
1606
1607  p = ready_lastpos (ready);
1608  for (i = 0; i < ready->n_ready; i++)
1609    fprintf (sched_dump, "  %s", (*current_sched_info->print_insn) (p[i], 0));
1610  fprintf (sched_dump, "\n");
1611}
1612
1613/* move_insn1: Remove INSN from insn chain, and link it after LAST insn.  */
1614
1615static rtx
1616move_insn1 (rtx insn, rtx last)
1617{
1618  NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1619  PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1620
1621  NEXT_INSN (insn) = NEXT_INSN (last);
1622  PREV_INSN (NEXT_INSN (last)) = insn;
1623
1624  NEXT_INSN (last) = insn;
1625  PREV_INSN (insn) = last;
1626
1627  return insn;
1628}
1629
1630/* Search INSN for REG_SAVE_NOTE note pairs for
1631   NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
1632   NOTEs.  The REG_SAVE_NOTE note following first one is contains the
1633   saved value for NOTE_BLOCK_NUMBER which is useful for
1634   NOTE_INSN_EH_REGION_{BEG,END} NOTEs.  LAST is the last instruction
1635   output by the instruction scheduler.  Return the new value of LAST.  */
1636
1637static rtx
1638reemit_notes (rtx insn, rtx last)
1639{
1640  rtx note, retval;
1641
1642  retval = last;
1643  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1644    {
1645      if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1646	{
1647	  enum insn_note note_type = INTVAL (XEXP (note, 0));
1648
1649	  last = emit_note_before (note_type, last);
1650	  remove_note (insn, note);
1651	}
1652    }
1653  return retval;
1654}
1655
1656/* Move INSN.  Reemit notes if needed.
1657
1658   Return the last insn emitted by the scheduler, which is the
1659   return value from the first call to reemit_notes.  */
1660
1661static rtx
1662move_insn (rtx insn, rtx last)
1663{
1664  rtx retval = NULL;
1665
1666  move_insn1 (insn, last);
1667
1668  /* If this is the first call to reemit_notes, then record
1669     its return value.  */
1670  if (retval == NULL_RTX)
1671    retval = reemit_notes (insn, insn);
1672  else
1673    reemit_notes (insn, insn);
1674
1675  SCHED_GROUP_P (insn) = 0;
1676
1677  return retval;
1678}
1679
1680/* The following structure describe an entry of the stack of choices.  */
1681struct choice_entry
1682{
1683  /* Ordinal number of the issued insn in the ready queue.  */
1684  int index;
1685  /* The number of the rest insns whose issues we should try.  */
1686  int rest;
1687  /* The number of issued essential insns.  */
1688  int n;
1689  /* State after issuing the insn.  */
1690  state_t state;
1691};
1692
1693/* The following array is used to implement a stack of choices used in
1694   function max_issue.  */
1695static struct choice_entry *choice_stack;
1696
1697/* The following variable value is number of essential insns issued on
1698   the current cycle.  An insn is essential one if it changes the
1699   processors state.  */
1700static int cycle_issued_insns;
1701
1702/* The following variable value is maximal number of tries of issuing
1703   insns for the first cycle multipass insn scheduling.  We define
1704   this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE).  We would not
1705   need this constraint if all real insns (with non-negative codes)
1706   had reservations because in this case the algorithm complexity is
1707   O(DFA_LOOKAHEAD**ISSUE_RATE).  Unfortunately, the dfa descriptions
1708   might be incomplete and such insn might occur.  For such
1709   descriptions, the complexity of algorithm (without the constraint)
1710   could achieve DFA_LOOKAHEAD ** N , where N is the queue length.  */
1711static int max_lookahead_tries;
1712
1713/* The following value is value of hook
1714   `first_cycle_multipass_dfa_lookahead' at the last call of
1715   `max_issue'.  */
1716static int cached_first_cycle_multipass_dfa_lookahead = 0;
1717
1718/* The following value is value of `issue_rate' at the last call of
1719   `sched_init'.  */
1720static int cached_issue_rate = 0;
1721
1722/* The following function returns maximal (or close to maximal) number
1723   of insns which can be issued on the same cycle and one of which
1724   insns is insns with the best rank (the first insn in READY).  To
1725   make this function tries different samples of ready insns.  READY
1726   is current queue `ready'.  Global array READY_TRY reflects what
1727   insns are already issued in this try.  INDEX will contain index
1728   of the best insn in READY.  The following function is used only for
1729   first cycle multipass scheduling.  */
1730static int
1731max_issue (struct ready_list *ready, int *index)
1732{
1733  int n, i, all, n_ready, best, delay, tries_num;
1734  struct choice_entry *top;
1735  rtx insn;
1736
1737  best = 0;
1738  memcpy (choice_stack->state, curr_state, dfa_state_size);
1739  top = choice_stack;
1740  top->rest = cached_first_cycle_multipass_dfa_lookahead;
1741  top->n = 0;
1742  n_ready = ready->n_ready;
1743  for (all = i = 0; i < n_ready; i++)
1744    if (!ready_try [i])
1745      all++;
1746  i = 0;
1747  tries_num = 0;
1748  for (;;)
1749    {
1750      if (top->rest == 0 || i >= n_ready)
1751	{
1752	  if (top == choice_stack)
1753	    break;
1754	  if (best < top - choice_stack && ready_try [0])
1755	    {
1756	      best = top - choice_stack;
1757	      *index = choice_stack [1].index;
1758	      if (top->n == issue_rate - cycle_issued_insns || best == all)
1759		break;
1760	    }
1761	  i = top->index;
1762	  ready_try [i] = 0;
1763	  top--;
1764	  memcpy (curr_state, top->state, dfa_state_size);
1765	}
1766      else if (!ready_try [i])
1767	{
1768	  tries_num++;
1769	  if (tries_num > max_lookahead_tries)
1770	    break;
1771	  insn = ready_element (ready, i);
1772	  delay = state_transition (curr_state, insn);
1773	  if (delay < 0)
1774	    {
1775	      if (state_dead_lock_p (curr_state))
1776		top->rest = 0;
1777	      else
1778		top->rest--;
1779	      n = top->n;
1780	      if (memcmp (top->state, curr_state, dfa_state_size) != 0)
1781		n++;
1782	      top++;
1783	      top->rest = cached_first_cycle_multipass_dfa_lookahead;
1784	      top->index = i;
1785	      top->n = n;
1786	      memcpy (top->state, curr_state, dfa_state_size);
1787	      ready_try [i] = 1;
1788	      i = -1;
1789	    }
1790	}
1791      i++;
1792    }
1793  while (top != choice_stack)
1794    {
1795      ready_try [top->index] = 0;
1796      top--;
1797    }
1798  memcpy (curr_state, choice_stack->state, dfa_state_size);
1799  return best;
1800}
1801
1802/* The following function chooses insn from READY and modifies
1803   *N_READY and READY.  The following function is used only for first
1804   cycle multipass scheduling.  */
1805
1806static rtx
1807choose_ready (struct ready_list *ready)
1808{
1809  int lookahead = 0;
1810
1811  if (targetm.sched.first_cycle_multipass_dfa_lookahead)
1812    lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
1813  if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
1814    return ready_remove_first (ready);
1815  else
1816    {
1817      /* Try to choose the better insn.  */
1818      int index = 0, i;
1819      rtx insn;
1820
1821      if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
1822	{
1823	  cached_first_cycle_multipass_dfa_lookahead = lookahead;
1824	  max_lookahead_tries = 100;
1825	  for (i = 0; i < issue_rate; i++)
1826	    max_lookahead_tries *= lookahead;
1827	}
1828      insn = ready_element (ready, 0);
1829      if (INSN_CODE (insn) < 0)
1830	return ready_remove_first (ready);
1831      for (i = 1; i < ready->n_ready; i++)
1832	{
1833	  insn = ready_element (ready, i);
1834	  ready_try [i]
1835	    = (INSN_CODE (insn) < 0
1836	       || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
1837		   && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn)));
1838	}
1839      if (max_issue (ready, &index) == 0)
1840	return ready_remove_first (ready);
1841      else
1842	return ready_remove (ready, index);
1843    }
1844}
1845
1846/* Use forward list scheduling to rearrange insns of block B in region RGN,
1847   possibly bringing insns from subsequent blocks in the same region.  */
1848
1849void
1850schedule_block (int b, int rgn_n_insns)
1851{
1852  struct ready_list ready;
1853  int i, first_cycle_insn_p;
1854  int can_issue_more;
1855  state_t temp_state = NULL;  /* It is used for multipass scheduling.  */
1856  int sort_p, advance, start_clock_var;
1857
1858  /* Head/tail info for this block.  */
1859  rtx prev_head = current_sched_info->prev_head;
1860  rtx next_tail = current_sched_info->next_tail;
1861  rtx head = NEXT_INSN (prev_head);
1862  rtx tail = PREV_INSN (next_tail);
1863
1864  /* We used to have code to avoid getting parameters moved from hard
1865     argument registers into pseudos.
1866
1867     However, it was removed when it proved to be of marginal benefit
1868     and caused problems because schedule_block and compute_forward_dependences
1869     had different notions of what the "head" insn was.  */
1870
1871  gcc_assert (head != tail || INSN_P (head));
1872
1873  /* Debug info.  */
1874  if (sched_verbose)
1875    {
1876      fprintf (sched_dump,
1877	       ";;   ======================================================\n");
1878      fprintf (sched_dump,
1879	       ";;   -- basic block %d from %d to %d -- %s reload\n",
1880	       b, INSN_UID (head), INSN_UID (tail),
1881	       (reload_completed ? "after" : "before"));
1882      fprintf (sched_dump,
1883	       ";;   ======================================================\n");
1884      fprintf (sched_dump, "\n");
1885    }
1886
1887  state_reset (curr_state);
1888
1889  /* Allocate the ready list.  */
1890  ready.veclen = rgn_n_insns + 1 + issue_rate;
1891  ready.first = ready.veclen - 1;
1892  ready.vec = xmalloc (ready.veclen * sizeof (rtx));
1893  ready.n_ready = 0;
1894
1895  /* It is used for first cycle multipass scheduling.  */
1896  temp_state = alloca (dfa_state_size);
1897  ready_try = xcalloc ((rgn_n_insns + 1), sizeof (char));
1898  choice_stack = xmalloc ((rgn_n_insns + 1)
1899			  * sizeof (struct choice_entry));
1900  for (i = 0; i <= rgn_n_insns; i++)
1901    choice_stack[i].state = xmalloc (dfa_state_size);
1902
1903  (*current_sched_info->init_ready_list) (&ready);
1904
1905  if (targetm.sched.md_init)
1906    targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
1907
1908  /* We start inserting insns after PREV_HEAD.  */
1909  last_scheduled_insn = prev_head;
1910
1911  /* Initialize INSN_QUEUE.  Q_SIZE is the total number of insns in the
1912     queue.  */
1913  q_ptr = 0;
1914  q_size = 0;
1915
1916  insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
1917  memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
1918  last_clock_var = -1;
1919
1920  /* The algorithm is O(n^2) in the number of ready insns at any given
1921     time in the worst case.  Before reload we are more likely to have
1922     big lists so truncate them to a reasonable size.  */
1923  if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
1924    {
1925      ready_sort (&ready);
1926
1927      /* Find first free-standing insn past MAX_SCHED_READY_INSNS.  */
1928      for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
1929	if (!SCHED_GROUP_P (ready_element (&ready, i)))
1930	  break;
1931
1932      if (sched_verbose >= 2)
1933	{
1934	  fprintf (sched_dump,
1935		   ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
1936	  fprintf (sched_dump,
1937		   ";;\t\t before reload => truncated to %d insns\n", i);
1938	}
1939
1940      /* Delay all insns past it for 1 cycle.  */
1941      while (i < ready.n_ready)
1942	queue_insn (ready_remove (&ready, i), 1);
1943    }
1944
1945  /* Start just before the beginning of time.  */
1946  clock_var = -1;
1947  advance = 0;
1948
1949  sort_p = TRUE;
1950  /* Loop until all the insns in BB are scheduled.  */
1951  while ((*current_sched_info->schedule_more_p) ())
1952    {
1953      do
1954	{
1955	  start_clock_var = clock_var;
1956
1957	  clock_var++;
1958
1959	  advance_one_cycle ();
1960
1961	  /* Add to the ready list all pending insns that can be issued now.
1962	     If there are no ready insns, increment clock until one
1963	     is ready and add all pending insns at that point to the ready
1964	     list.  */
1965	  queue_to_ready (&ready);
1966
1967	  gcc_assert (ready.n_ready);
1968
1969	  if (sched_verbose >= 2)
1970	    {
1971	      fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:  ");
1972	      debug_ready_list (&ready);
1973	    }
1974	  advance -= clock_var - start_clock_var;
1975	}
1976      while (advance > 0);
1977
1978      if (sort_p)
1979	{
1980	  /* Sort the ready list based on priority.  */
1981	  ready_sort (&ready);
1982
1983	  if (sched_verbose >= 2)
1984	    {
1985	      fprintf (sched_dump, ";;\t\tReady list after ready_sort:  ");
1986	      debug_ready_list (&ready);
1987	    }
1988	}
1989
1990      /* Allow the target to reorder the list, typically for
1991	 better instruction bundling.  */
1992      if (sort_p && targetm.sched.reorder
1993	  && (ready.n_ready == 0
1994	      || !SCHED_GROUP_P (ready_element (&ready, 0))))
1995	can_issue_more =
1996	  targetm.sched.reorder (sched_dump, sched_verbose,
1997				 ready_lastpos (&ready),
1998				 &ready.n_ready, clock_var);
1999      else
2000	can_issue_more = issue_rate;
2001
2002      first_cycle_insn_p = 1;
2003      cycle_issued_insns = 0;
2004      for (;;)
2005	{
2006	  rtx insn;
2007	  int cost;
2008	  bool asm_p = false;
2009
2010	  if (sched_verbose >= 2)
2011	    {
2012	      fprintf (sched_dump, ";;\tReady list (t =%3d):  ",
2013		       clock_var);
2014	      debug_ready_list (&ready);
2015	    }
2016
2017	  if (ready.n_ready == 0
2018	      && can_issue_more
2019	      && reload_completed)
2020	    {
2021	      /* Allow scheduling insns directly from the queue in case
2022		 there's nothing better to do (ready list is empty) but
2023		 there are still vacant dispatch slots in the current cycle.  */
2024	      if (sched_verbose >= 6)
2025		fprintf(sched_dump,";;\t\tSecond chance\n");
2026	      memcpy (temp_state, curr_state, dfa_state_size);
2027	      if (early_queue_to_ready (temp_state, &ready))
2028		ready_sort (&ready);
2029	    }
2030
2031	  if (ready.n_ready == 0 || !can_issue_more
2032	      || state_dead_lock_p (curr_state)
2033	      || !(*current_sched_info->schedule_more_p) ())
2034	    break;
2035
2036	  /* Select and remove the insn from the ready list.  */
2037	  if (sort_p)
2038	    insn = choose_ready (&ready);
2039	  else
2040	    insn = ready_remove_first (&ready);
2041
2042	  if (targetm.sched.dfa_new_cycle
2043	      && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2044					      insn, last_clock_var,
2045					      clock_var, &sort_p))
2046	    {
2047	      ready_add (&ready, insn);
2048	      break;
2049	    }
2050
2051	  sort_p = TRUE;
2052	  memcpy (temp_state, curr_state, dfa_state_size);
2053	  if (recog_memoized (insn) < 0)
2054	    {
2055	      asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2056		       || asm_noperands (PATTERN (insn)) >= 0);
2057	      if (!first_cycle_insn_p && asm_p)
2058		/* This is asm insn which is tryed to be issued on the
2059		   cycle not first.  Issue it on the next cycle.  */
2060		cost = 1;
2061	      else
2062		/* A USE insn, or something else we don't need to
2063		   understand.  We can't pass these directly to
2064		   state_transition because it will trigger a
2065		   fatal error for unrecognizable insns.  */
2066		cost = 0;
2067	    }
2068	  else
2069	    {
2070	      cost = state_transition (temp_state, insn);
2071	      if (cost < 0)
2072		cost = 0;
2073	      else if (cost == 0)
2074		cost = 1;
2075	    }
2076
2077	  if (cost >= 1)
2078	    {
2079	      queue_insn (insn, cost);
2080 	      if (SCHED_GROUP_P (insn))
2081 		{
2082 		  advance = cost;
2083 		  break;
2084 		}
2085
2086	      continue;
2087	    }
2088
2089	  if (! (*current_sched_info->can_schedule_ready_p) (insn))
2090	    goto next;
2091
2092	  last_scheduled_insn = move_insn (insn, last_scheduled_insn);
2093
2094	  if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2095	    cycle_issued_insns++;
2096	  memcpy (curr_state, temp_state, dfa_state_size);
2097
2098	  if (targetm.sched.variable_issue)
2099	    can_issue_more =
2100	      targetm.sched.variable_issue (sched_dump, sched_verbose,
2101					       insn, can_issue_more);
2102	  /* A naked CLOBBER or USE generates no instruction, so do
2103	     not count them against the issue rate.  */
2104	  else if (GET_CODE (PATTERN (insn)) != USE
2105		   && GET_CODE (PATTERN (insn)) != CLOBBER)
2106	    can_issue_more--;
2107
2108	  advance = schedule_insn (insn, &ready, clock_var);
2109
2110	  /* After issuing an asm insn we should start a new cycle.  */
2111	  if (advance == 0 && asm_p)
2112	    advance = 1;
2113	  if (advance != 0)
2114	    break;
2115
2116	next:
2117	  first_cycle_insn_p = 0;
2118
2119	  /* Sort the ready list based on priority.  This must be
2120	     redone here, as schedule_insn may have readied additional
2121	     insns that will not be sorted correctly.  */
2122	  if (ready.n_ready > 0)
2123	    ready_sort (&ready);
2124
2125	  if (targetm.sched.reorder2
2126	      && (ready.n_ready == 0
2127		  || !SCHED_GROUP_P (ready_element (&ready, 0))))
2128	    {
2129	      can_issue_more =
2130		targetm.sched.reorder2 (sched_dump, sched_verbose,
2131					ready.n_ready
2132					? ready_lastpos (&ready) : NULL,
2133					&ready.n_ready, clock_var);
2134	    }
2135	}
2136    }
2137
2138  if (targetm.sched.md_finish)
2139    targetm.sched.md_finish (sched_dump, sched_verbose);
2140
2141  /* Debug info.  */
2142  if (sched_verbose)
2143    {
2144      fprintf (sched_dump, ";;\tReady list (final):  ");
2145      debug_ready_list (&ready);
2146    }
2147
2148  /* Sanity check -- queue must be empty now.  Meaningless if region has
2149     multiple bbs.  */
2150  gcc_assert (!current_sched_info->queue_must_finish_empty || !q_size);
2151
2152  /* Update head/tail boundaries.  */
2153  head = NEXT_INSN (prev_head);
2154  tail = last_scheduled_insn;
2155
2156  if (!reload_completed)
2157    {
2158      rtx insn, link, next;
2159
2160      /* INSN_TICK (minimum clock tick at which the insn becomes
2161         ready) may be not correct for the insn in the subsequent
2162         blocks of the region.  We should use a correct value of
2163         `clock_var' or modify INSN_TICK.  It is better to keep
2164         clock_var value equal to 0 at the start of a basic block.
2165         Therefore we modify INSN_TICK here.  */
2166      for (insn = head; insn != tail; insn = NEXT_INSN (insn))
2167	if (INSN_P (insn))
2168	  {
2169	    for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
2170	      {
2171		next = XEXP (link, 0);
2172		INSN_TICK (next) -= clock_var;
2173	      }
2174	  }
2175    }
2176
2177  /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2178     previously found among the insns.  Insert them at the beginning
2179     of the insns.  */
2180  if (note_list != 0)
2181    {
2182      rtx note_head = note_list;
2183
2184      while (PREV_INSN (note_head))
2185	{
2186	  note_head = PREV_INSN (note_head);
2187	}
2188
2189      PREV_INSN (note_head) = PREV_INSN (head);
2190      NEXT_INSN (PREV_INSN (head)) = note_head;
2191      PREV_INSN (head) = note_list;
2192      NEXT_INSN (note_list) = head;
2193      head = note_head;
2194    }
2195
2196  /* Debugging.  */
2197  if (sched_verbose)
2198    {
2199      fprintf (sched_dump, ";;   total time = %d\n;;   new head = %d\n",
2200	       clock_var, INSN_UID (head));
2201      fprintf (sched_dump, ";;   new tail = %d\n\n",
2202	       INSN_UID (tail));
2203    }
2204
2205  current_sched_info->head = head;
2206  current_sched_info->tail = tail;
2207
2208  free (ready.vec);
2209
2210  free (ready_try);
2211  for (i = 0; i <= rgn_n_insns; i++)
2212    free (choice_stack [i].state);
2213  free (choice_stack);
2214}
2215
2216/* Set_priorities: compute priority of each insn in the block.  */
2217
2218int
2219set_priorities (rtx head, rtx tail)
2220{
2221  rtx insn;
2222  int n_insn;
2223  int sched_max_insns_priority =
2224	current_sched_info->sched_max_insns_priority;
2225  rtx prev_head;
2226
2227  prev_head = PREV_INSN (head);
2228
2229  if (head == tail && (! INSN_P (head)))
2230    return 0;
2231
2232  n_insn = 0;
2233  sched_max_insns_priority = 0;
2234  for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2235    {
2236      if (NOTE_P (insn))
2237	continue;
2238
2239      n_insn++;
2240      (void) priority (insn);
2241
2242      if (INSN_PRIORITY_KNOWN (insn))
2243	sched_max_insns_priority =
2244	  MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2245    }
2246  sched_max_insns_priority += 1;
2247  current_sched_info->sched_max_insns_priority =
2248	sched_max_insns_priority;
2249
2250  return n_insn;
2251}
2252
2253/* Initialize some global state for the scheduler.  DUMP_FILE is to be used
2254   for debugging output.  */
2255
2256void
2257sched_init (FILE *dump_file)
2258{
2259  int luid;
2260  basic_block b;
2261  rtx insn;
2262  int i;
2263
2264  /* Disable speculative loads in their presence if cc0 defined.  */
2265#ifdef HAVE_cc0
2266  flag_schedule_speculative_load = 0;
2267#endif
2268
2269  /* Set dump and sched_verbose for the desired debugging output.  If no
2270     dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2271     For -fsched-verbose=N, N>=10, print everything to stderr.  */
2272  sched_verbose = sched_verbose_param;
2273  if (sched_verbose_param == 0 && dump_file)
2274    sched_verbose = 1;
2275  sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2276		? stderr : dump_file);
2277
2278  /* Initialize issue_rate.  */
2279  if (targetm.sched.issue_rate)
2280    issue_rate = targetm.sched.issue_rate ();
2281  else
2282    issue_rate = 1;
2283
2284  if (cached_issue_rate != issue_rate)
2285    {
2286      cached_issue_rate = issue_rate;
2287      /* To invalidate max_lookahead_tries:  */
2288      cached_first_cycle_multipass_dfa_lookahead = 0;
2289    }
2290
2291  /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
2292     pseudos which do not cross calls.  */
2293  old_max_uid = get_max_uid () + 1;
2294
2295  h_i_d = xcalloc (old_max_uid, sizeof (*h_i_d));
2296
2297  for (i = 0; i < old_max_uid; i++)
2298    h_i_d [i].cost = -1;
2299
2300  if (targetm.sched.init_dfa_pre_cycle_insn)
2301    targetm.sched.init_dfa_pre_cycle_insn ();
2302
2303  if (targetm.sched.init_dfa_post_cycle_insn)
2304    targetm.sched.init_dfa_post_cycle_insn ();
2305
2306  dfa_start ();
2307  dfa_state_size = state_size ();
2308  curr_state = xmalloc (dfa_state_size);
2309
2310  h_i_d[0].luid = 0;
2311  luid = 1;
2312  FOR_EACH_BB (b)
2313    for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2314      {
2315	INSN_LUID (insn) = luid;
2316
2317	/* Increment the next luid, unless this is a note.  We don't
2318	   really need separate IDs for notes and we don't want to
2319	   schedule differently depending on whether or not there are
2320	   line-number notes, i.e., depending on whether or not we're
2321	   generating debugging information.  */
2322	if (!NOTE_P (insn))
2323	  ++luid;
2324
2325	if (insn == BB_END (b))
2326	  break;
2327      }
2328
2329  init_dependency_caches (luid);
2330
2331  init_alias_analysis ();
2332
2333  if (write_symbols != NO_DEBUG)
2334    {
2335      rtx line;
2336
2337      line_note_head = xcalloc (last_basic_block, sizeof (rtx));
2338
2339      /* Save-line-note-head:
2340         Determine the line-number at the start of each basic block.
2341         This must be computed and saved now, because after a basic block's
2342         predecessor has been scheduled, it is impossible to accurately
2343         determine the correct line number for the first insn of the block.  */
2344
2345      FOR_EACH_BB (b)
2346	{
2347	  for (line = BB_HEAD (b); line; line = PREV_INSN (line))
2348	    if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2349	      {
2350		line_note_head[b->index] = line;
2351		break;
2352	      }
2353	  /* Do a forward search as well, since we won't get to see the first
2354	     notes in a basic block.  */
2355	  for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
2356	    {
2357	      if (INSN_P (line))
2358		break;
2359	      if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2360		line_note_head[b->index] = line;
2361	    }
2362	}
2363    }
2364
2365  /* ??? Add a NOTE after the last insn of the last basic block.  It is not
2366     known why this is done.  */
2367
2368  insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
2369  if (NEXT_INSN (insn) == 0
2370      || (!NOTE_P (insn)
2371	  && !LABEL_P (insn)
2372	  /* Don't emit a NOTE if it would end up before a BARRIER.  */
2373	  && !BARRIER_P (NEXT_INSN (insn))))
2374    {
2375      emit_note_after (NOTE_INSN_DELETED, BB_END (EXIT_BLOCK_PTR->prev_bb));
2376      /* Make insn to appear outside BB.  */
2377      BB_END (EXIT_BLOCK_PTR->prev_bb) = PREV_INSN (BB_END (EXIT_BLOCK_PTR->prev_bb));
2378    }
2379
2380  /* Compute INSN_REG_WEIGHT for all blocks.  We must do this before
2381     removing death notes.  */
2382  FOR_EACH_BB_REVERSE (b)
2383    find_insn_reg_weight (b->index);
2384
2385  if (targetm.sched.md_init_global)
2386      targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2387}
2388
2389/* Free global data used during insn scheduling.  */
2390
2391void
2392sched_finish (void)
2393{
2394  free (h_i_d);
2395  free (curr_state);
2396  dfa_finish ();
2397  free_dependency_caches ();
2398  end_alias_analysis ();
2399  if (write_symbols != NO_DEBUG)
2400    free (line_note_head);
2401
2402  if (targetm.sched.md_finish_global)
2403      targetm.sched.md_finish_global (sched_dump, sched_verbose);
2404}
2405#endif /* INSN_SCHEDULING */
2406