1/* Instruction scheduling pass.
2   Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
3   Contributed by Michael Tiemann (tiemann@cygnus.com)
4   Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6This file is part of GNU CC.
7
8GNU CC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 2, or (at your option)
11any later version.
12
13GNU CC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GNU CC; see the file COPYING.  If not, write to
20the Free Software Foundation, 59 Temple Place - Suite 330,
21Boston, MA 02111-1307, USA.  */
22
23/* Instruction scheduling pass.
24
25   This pass implements list scheduling within basic blocks.  It is
26   run after flow analysis, but before register allocation.  The
27   scheduler works as follows:
28
29   We compute insn priorities based on data dependencies.  Flow
30   analysis only creates a fraction of the data-dependencies we must
31   observe: namely, only those dependencies which the combiner can be
32   expected to use.  For this pass, we must therefore create the
33   remaining dependencies we need to observe: register dependencies,
34   memory dependencies, dependencies to keep function calls in order,
35   and the dependence between a conditional branch and the setting of
36   condition codes are all dealt with here.
37
38   The scheduler first traverses the data flow graph, starting with
39   the last instruction, and proceeding to the first, assigning
40   values to insn_priority as it goes.  This sorts the instructions
41   topologically by data dependence.
42
43   Once priorities have been established, we order the insns using
44   list scheduling.  This works as follows: starting with a list of
45   all the ready insns, and sorted according to priority number, we
46   schedule the insn from the end of the list by placing its
47   predecessors in the list according to their priority order.  We
48   consider this insn scheduled by setting the pointer to the "end" of
49   the list to point to the previous insn.  When an insn has no
50   predecessors, we either queue it until sufficient time has elapsed
51   or add it to the ready list.  As the instructions are scheduled or
52   when stalls are introduced, the queue advances and dumps insns into
53   the ready list.  When all insns down to the lowest priority have
54   been scheduled, the critical path of the basic block has been made
55   as short as possible.  The remaining insns are then scheduled in
56   remaining slots.
57
58   Function unit conflicts are resolved during reverse list scheduling
59   by tracking the time when each insn is committed to the schedule
60   and from that, the time the function units it uses must be free.
61   As insns on the ready list are considered for scheduling, those
62   that would result in a blockage of the already committed insns are
63   queued until no blockage will result.  Among the remaining insns on
64   the ready list to be considered, the first one with the largest
65   potential for causing a subsequent blockage is chosen.
66
67   The following list shows the order in which we want to break ties
68   among insns in the ready list:
69
70	1.  choose insn with lowest conflict cost, ties broken by
71	2.  choose insn with the longest path to end of bb, ties broken by
72	3.  choose insn that kills the most registers, ties broken by
73	4.  choose insn that conflicts with the most ready insns, or finally
74	5.  choose insn with lowest UID.
75
76   Memory references complicate matters.  Only if we can be certain
77   that memory references are not part of the data dependency graph
78   (via true, anti, or output dependence), can we move operations past
79   memory references.  To first approximation, reads can be done
80   independently, while writes introduce dependencies.  Better
81   approximations will yield fewer dependencies.
82
83   Dependencies set up by memory references are treated in exactly the
84   same way as other dependencies, by using LOG_LINKS.
85
86   Having optimized the critical path, we may have also unduly
87   extended the lifetimes of some registers.  If an operation requires
88   that constants be loaded into registers, it is certainly desirable
89   to load those constants as early as necessary, but no earlier.
90   I.e., it will not do to load up a bunch of registers at the
91   beginning of a basic block only to use them at the end, if they
92   could be loaded later, since this may result in excessive register
93   utilization.
94
95   Note that since branches are never in basic blocks, but only end
96   basic blocks, this pass will not do any branch scheduling.  But
97   that is ok, since we can use GNU's delayed branch scheduling
98   pass to take care of this case.
99
100   Also note that no further optimizations based on algebraic identities
101   are performed, so this pass would be a good one to perform instruction
102   splitting, such as breaking up a multiply instruction into shifts
103   and adds where that is profitable.
104
105   Given the memory aliasing analysis that this pass should perform,
106   it should be possible to remove redundant stores to memory, and to
107   load values from registers instead of hitting memory.
108
109   This pass must update information that subsequent passes expect to be
110   correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
111   reg_n_calls_crossed, and reg_live_length.  Also, BLOCK_HEAD,
112   BLOCK_END.
113
114   The information in the line number notes is carefully retained by
115   this pass.  Notes that refer to the starting and ending of
116   exception regions are also carefully retained by this pass.  All
117   other NOTE insns are grouped in their same relative order at the
118   beginning of basic blocks that have been scheduled.  */
119
120#include "config.h"
121#include "system.h"
122#include "toplev.h"
123#include "rtl.h"
124#include "basic-block.h"
125#include "regs.h"
126#include "hard-reg-set.h"
127#include "flags.h"
128#include "insn-config.h"
129#include "insn-attr.h"
130#include "recog.h"
131
132#ifndef INSN_SCHEDULING
133void
134schedule_insns (dump_file)
135     FILE *dump_file ATTRIBUTE_UNUSED;
136{
137}
138#else /* INSN_SCHEDULING -- rest of file */
139
140extern char *reg_known_equiv_p;
141extern rtx *reg_known_value;
142
143/* Arrays set up by scheduling for the same respective purposes as
144   similar-named arrays set up by flow analysis.  We work with these
145   arrays during the scheduling pass so we can compare values against
146   unscheduled code.
147
148   Values of these arrays are copied at the end of this pass into the
149   arrays set up by flow analysis.  */
150static int *sched_reg_n_calls_crossed;
151static int *sched_reg_live_length;
152
153/* Element N is the next insn that sets (hard or pseudo) register
154   N within the current basic block; or zero, if there is no
155   such insn.  Needed for new registers which may be introduced
156   by splitting insns.  */
157static rtx *reg_last_uses;
158static rtx *reg_last_sets;
159static regset reg_pending_sets;
160static int reg_pending_sets_all;
161
162/* Vector indexed by INSN_UID giving the original ordering of the insns.  */
163static int *insn_luid;
164#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
165
166/* Vector indexed by INSN_UID giving each instruction a priority.  */
167static int *insn_priority;
168#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
169
170static short *insn_costs;
171#define INSN_COST(INSN)	insn_costs[INSN_UID (INSN)]
172
173/* Vector indexed by INSN_UID giving an encoding of the function units
174   used.  */
175static short *insn_units;
176#define INSN_UNIT(INSN)	insn_units[INSN_UID (INSN)]
177
178/* Vector indexed by INSN_UID giving an encoding of the blockage range
179   function.  The unit and the range are encoded.  */
180static unsigned int *insn_blockage;
181#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
182#define UNIT_BITS 5
183#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
184#define ENCODE_BLOCKAGE(U,R)				\
185  ((((U) << UNIT_BITS) << BLOCKAGE_BITS			\
186    | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS		\
187   | MAX_BLOCKAGE_COST (R))
188#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
189#define BLOCKAGE_RANGE(B) \
190  (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
191   | ((B) & BLOCKAGE_MASK))
192
193/* Encodings of the `<name>_unit_blockage_range' function.  */
194#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
195#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
196
197#define DONE_PRIORITY	-1
198#define MAX_PRIORITY	0x7fffffff
199#define TAIL_PRIORITY	0x7ffffffe
200#define LAUNCH_PRIORITY	0x7f000001
201#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
202#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
203
204/* Vector indexed by INSN_UID giving number of insns referring to this insn.  */
205static int *insn_ref_count;
206#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
207
208/* Vector indexed by INSN_UID giving line-number note in effect for each
209   insn.  For line-number notes, this indicates whether the note may be
210   reused.  */
211static rtx *line_note;
212#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
213
214/* Vector indexed by basic block number giving the starting line-number
215   for each basic block.  */
216static rtx *line_note_head;
217
218/* List of important notes we must keep around.  This is a pointer to the
219   last element in the list.  */
220static rtx note_list;
221
222/* Regsets telling whether a given register is live or dead before the last
223   scheduled insn.  Must scan the instructions once before scheduling to
224   determine what registers are live or dead at the end of the block.  */
225static regset bb_dead_regs;
226static regset bb_live_regs;
227
228/* Regset telling whether a given register is live after the insn currently
229   being scheduled.  Before processing an insn, this is equal to bb_live_regs
230   above.  This is used so that we can find registers that are newly born/dead
231   after processing an insn.  */
232static regset old_live_regs;
233
234/* The chain of REG_DEAD notes.  REG_DEAD notes are removed from all insns
235   during the initial scan and reused later.  If there are not exactly as
236   many REG_DEAD notes in the post scheduled code as there were in the
237   prescheduled code then we trigger an abort because this indicates a bug.  */
238static rtx dead_notes;
239
240/* Queues, etc.  */
241
242/* An instruction is ready to be scheduled when all insns following it
243   have already been scheduled.  It is important to ensure that all
244   insns which use its result will not be executed until its result
245   has been computed.  An insn is maintained in one of four structures:
246
247   (P) the "Pending" set of insns which cannot be scheduled until
248   their dependencies have been satisfied.
249   (Q) the "Queued" set of insns that can be scheduled when sufficient
250   time has passed.
251   (R) the "Ready" list of unscheduled, uncommitted insns.
252   (S) the "Scheduled" list of insns.
253
254   Initially, all insns are either "Pending" or "Ready" depending on
255   whether their dependencies are satisfied.
256
257   Insns move from the "Ready" list to the "Scheduled" list as they
258   are committed to the schedule.  As this occurs, the insns in the
259   "Pending" list have their dependencies satisfied and move to either
260   the "Ready" list or the "Queued" set depending on whether
261   sufficient time has passed to make them ready.  As time passes,
262   insns move from the "Queued" set to the "Ready" list.  Insns may
263   move from the "Ready" list to the "Queued" set if they are blocked
264   due to a function unit conflict.
265
266   The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
267   insns, i.e., those that are ready, queued, and pending.
268   The "Queued" set (Q) is implemented by the variable `insn_queue'.
269   The "Ready" list (R) is implemented by the variables `ready' and
270   `n_ready'.
271   The "Scheduled" list (S) is the new insn chain built by this pass.
272
273   The transition (R->S) is implemented in the scheduling loop in
274   `schedule_block' when the best insn to schedule is chosen.
275   The transition (R->Q) is implemented in `schedule_select' when an
276   insn is found to have a function unit conflict with the already
277   committed insns.
278   The transitions (P->R and P->Q) are implemented in `schedule_insn' as
279   insns move from the ready list to the scheduled list.
280   The transition (Q->R) is implemented at the top of the scheduling
281   loop in `schedule_block' as time passes or stalls are introduced.  */
282
283/* Implement a circular buffer to delay instructions until sufficient
284   time has passed.  INSN_QUEUE_SIZE is a power of two larger than
285   MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c.  This is the
286   longest time an isnsn may be queued.  */
287static rtx insn_queue[INSN_QUEUE_SIZE];
288static int q_ptr = 0;
289static int q_size = 0;
290#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
291#define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
292
293/* Vector indexed by INSN_UID giving the minimum clock tick at which
294   the insn becomes ready.  This is used to note timing constraints for
295   insns in the pending list.  */
296static int *insn_tick;
297#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
298
299/* Data structure for keeping track of register information
300   during that register's life.  */
301
302struct sometimes
303{
304  int regno;
305  int live_length;
306  int calls_crossed;
307};
308
309/* Forward declarations.  */
310static void add_dependence		PROTO((rtx, rtx, enum reg_note));
311static void remove_dependence		PROTO((rtx, rtx));
312static rtx find_insn_list		PROTO((rtx, rtx));
313static int insn_unit			PROTO((rtx));
314static unsigned int blockage_range	PROTO((int, rtx));
315static void clear_units			PROTO((void));
316static void prepare_unit		PROTO((int));
317static int actual_hazard_this_instance	PROTO((int, int, rtx, int, int));
318static void schedule_unit		PROTO((int, rtx, int));
319static int actual_hazard		PROTO((int, rtx, int, int));
320static int potential_hazard		PROTO((int, rtx, int));
321static int insn_cost			PROTO((rtx, rtx, rtx));
322static int priority			PROTO((rtx));
323static void free_pending_lists		PROTO((void));
324static void add_insn_mem_dependence	PROTO((rtx *, rtx *, rtx, rtx));
325static void flush_pending_lists		PROTO((rtx, int));
326static void sched_analyze_1		PROTO((rtx, rtx));
327static void sched_analyze_2		PROTO((rtx, rtx));
328static void sched_analyze_insn		PROTO((rtx, rtx, rtx));
329static int sched_analyze		PROTO((rtx, rtx));
330static void sched_note_set		PROTO((rtx, int));
331static int rank_for_schedule		PROTO((const GENERIC_PTR, const GENERIC_PTR));
332static void swap_sort			PROTO((rtx *, int));
333static void queue_insn			PROTO((rtx, int));
334static int birthing_insn_p		PROTO((rtx));
335static void adjust_priority		PROTO((rtx));
336static int schedule_insn		PROTO((rtx, rtx *, int, int));
337static int schedule_select		PROTO((rtx *, int, int, FILE *));
338static void create_reg_dead_note	PROTO((rtx, rtx));
339static void attach_deaths		PROTO((rtx, rtx, int));
340static void attach_deaths_insn		PROTO((rtx));
341static rtx unlink_notes			PROTO((rtx, rtx));
342static int new_sometimes_live		PROTO((struct sometimes *, int, int));
343static void finish_sometimes_live	PROTO((struct sometimes *, int));
344static rtx reemit_notes			PROTO((rtx, rtx));
345static void schedule_block		PROTO((int, FILE *));
346static void split_hard_reg_notes	PROTO((rtx, rtx, rtx));
347static void new_insn_dead_notes		PROTO((rtx, rtx, rtx, rtx));
348static void update_n_sets		PROTO((rtx, int));
349
350/* Main entry point of this file.  */
351void schedule_insns	PROTO((FILE *));
352
353#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
354
355/* Helper functions for instruction scheduling.  */
356
357/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
358   LOG_LINKS of INSN, if not already there.  DEP_TYPE indicates the type
359   of dependence that this link represents.  */
360
361static void
362add_dependence (insn, elem, dep_type)
363     rtx insn;
364     rtx elem;
365     enum reg_note dep_type;
366{
367  rtx link, next;
368
369  /* Don't depend an insn on itself.  */
370  if (insn == elem)
371    return;
372
373  /* If elem is part of a sequence that must be scheduled together, then
374     make the dependence point to the last insn of the sequence.
375     When HAVE_cc0, it is possible for NOTEs to exist between users and
376     setters of the condition codes, so we must skip past notes here.
377     Otherwise, NOTEs are impossible here.  */
378
379  next = NEXT_INSN (elem);
380
381#ifdef HAVE_cc0
382  while (next && GET_CODE (next) == NOTE)
383    next = NEXT_INSN (next);
384#endif
385
386  if (next && SCHED_GROUP_P (next)
387      && GET_CODE (next) != CODE_LABEL)
388    {
389      /* Notes will never intervene here though, so don't bother checking
390	 for them.  */
391      /* We must reject CODE_LABELs, so that we don't get confused by one
392	 that has LABEL_PRESERVE_P set, which is represented by the same
393	 bit in the rtl as SCHED_GROUP_P.  A CODE_LABEL can never be
394	 SCHED_GROUP_P.  */
395      while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
396	     && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
397	next = NEXT_INSN (next);
398
399      /* Again, don't depend an insn on itself.  */
400      if (insn == next)
401	return;
402
403      /* Make the dependence to NEXT, the last insn of the group, instead
404	 of the original ELEM.  */
405      elem = next;
406    }
407
408  /* Check that we don't already have this dependence.  */
409  for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
410    if (XEXP (link, 0) == elem)
411      {
412	/* If this is a more restrictive type of dependence than the existing
413	   one, then change the existing dependence to this type.  */
414	if ((int) dep_type < (int) REG_NOTE_KIND (link))
415	  PUT_REG_NOTE_KIND (link, dep_type);
416	return;
417      }
418  /* Might want to check one level of transitivity to save conses.  */
419
420  link = rtx_alloc (INSN_LIST);
421  /* Insn dependency, not data dependency.  */
422  PUT_REG_NOTE_KIND (link, dep_type);
423  XEXP (link, 0) = elem;
424  XEXP (link, 1) = LOG_LINKS (insn);
425  LOG_LINKS (insn) = link;
426}
427
428/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
429   of INSN.  Abort if not found.  */
430
431static void
432remove_dependence (insn, elem)
433     rtx insn;
434     rtx elem;
435{
436  rtx prev, link;
437  int found = 0;
438
439  for (prev = 0, link = LOG_LINKS (insn); link; link = XEXP (link, 1))
440    {
441      if (XEXP (link, 0) == elem)
442	{
443	  RTX_INTEGRATED_P (link) = 1;
444	  if (prev)
445	    XEXP (prev, 1) = XEXP (link, 1);
446	  else
447	    LOG_LINKS (insn) = XEXP (link, 1);
448	  found = 1;
449	}
450      else
451	prev = link;
452    }
453
454  if (! found)
455    abort ();
456  return;
457}
458
459#ifndef __GNUC__
460#define __inline
461#endif
462
463/* Computation of memory dependencies.  */
464
465/* The *_insns and *_mems are paired lists.  Each pending memory operation
466   will have a pointer to the MEM rtx on one list and a pointer to the
467   containing insn on the other list in the same place in the list.  */
468
469/* We can't use add_dependence like the old code did, because a single insn
470   may have multiple memory accesses, and hence needs to be on the list
471   once for each memory access.  Add_dependence won't let you add an insn
472   to a list more than once.  */
473
474/* An INSN_LIST containing all insns with pending read operations.  */
475static rtx pending_read_insns;
476
477/* An EXPR_LIST containing all MEM rtx's which are pending reads.  */
478static rtx pending_read_mems;
479
480/* An INSN_LIST containing all insns with pending write operations.  */
481static rtx pending_write_insns;
482
483/* An EXPR_LIST containing all MEM rtx's which are pending writes.  */
484static rtx pending_write_mems;
485
486/* Indicates the combined length of the two pending lists.  We must prevent
487   these lists from ever growing too large since the number of dependencies
488   produced is at least O(N*N), and execution time is at least O(4*N*N), as
489   a function of the length of these pending lists.  */
490
491static int pending_lists_length;
492
493/* An INSN_LIST containing all INSN_LISTs allocated but currently unused.  */
494
495static rtx unused_insn_list;
496
497/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused.  */
498
499static rtx unused_expr_list;
500
501/* The last insn upon which all memory references must depend.
502   This is an insn which flushed the pending lists, creating a dependency
503   between it and all previously pending memory references.  This creates
504   a barrier (or a checkpoint) which no memory reference is allowed to cross.
505
506   This includes all non constant CALL_INSNs.  When we do interprocedural
507   alias analysis, this restriction can be relaxed.
508   This may also be an INSN that writes memory if the pending lists grow
509   too large.  */
510
511static rtx last_pending_memory_flush;
512
513/* The last function call we have seen.  All hard regs, and, of course,
514   the last function call, must depend on this.  */
515
516static rtx last_function_call;
517
518/* The LOG_LINKS field of this is a list of insns which use a pseudo register
519   that does not already cross a call.  We create dependencies between each
520   of those insn and the next call insn, to ensure that they won't cross a call
521   after scheduling is done.  */
522
523static rtx sched_before_next_call;
524
525/* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
526   so that insns independent of the last scheduled insn will be preferred
527   over dependent instructions.  */
528
529static rtx last_scheduled_insn;
530
531/* Process an insn's memory dependencies.  There are four kinds of
532   dependencies:
533
534   (0) read dependence: read follows read
535   (1) true dependence: read follows write
536   (2) anti dependence: write follows read
537   (3) output dependence: write follows write
538
539   We are careful to build only dependencies which actually exist, and
540   use transitivity to avoid building too many links.  */
541
542/* Return the INSN_LIST containing INSN in LIST, or NULL
543   if LIST does not contain INSN.  */
544
545__inline static rtx
546find_insn_list (insn, list)
547     rtx insn;
548     rtx list;
549{
550  while (list)
551    {
552      if (XEXP (list, 0) == insn)
553	return list;
554      list = XEXP (list, 1);
555    }
556  return 0;
557}
558
559/* Compute the function units used by INSN.  This caches the value
560   returned by function_units_used.  A function unit is encoded as the
561   unit number if the value is non-negative and the compliment of a
562   mask if the value is negative.  A function unit index is the
563   non-negative encoding.  */
564
565__inline static int
566insn_unit (insn)
567     rtx insn;
568{
569  register int unit = INSN_UNIT (insn);
570
571  if (unit == 0)
572    {
573      recog_memoized (insn);
574
575      /* A USE insn, or something else we don't need to understand.
576	 We can't pass these directly to function_units_used because it will
577	 trigger a fatal error for unrecognizable insns.  */
578      if (INSN_CODE (insn) < 0)
579	unit = -1;
580      else
581	{
582	  unit = function_units_used (insn);
583	  /* Increment non-negative values so we can cache zero.  */
584	  if (unit >= 0) unit++;
585	}
586      /* We only cache 16 bits of the result, so if the value is out of
587	 range, don't cache it.  */
588      if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
589	  || unit >= 0
590	  || (unit & ~((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
591      INSN_UNIT (insn) = unit;
592    }
593  return (unit > 0 ? unit - 1 : unit);
594}
595
596/* Compute the blockage range for executing INSN on UNIT.  This caches
597   the value returned by the blockage_range_function for the unit.
598   These values are encoded in an int where the upper half gives the
599   minimum value and the lower half gives the maximum value.  */
600
601__inline static unsigned int
602blockage_range (unit, insn)
603     int unit;
604     rtx insn;
605{
606  unsigned int blockage = INSN_BLOCKAGE (insn);
607  unsigned int range;
608
609  if ((int) UNIT_BLOCKED (blockage) != unit + 1)
610    {
611      range = function_units[unit].blockage_range_function (insn);
612      /* We only cache the blockage range for one unit and then only if
613	 the values fit.  */
614      if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
615	INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
616    }
617  else
618    range = BLOCKAGE_RANGE (blockage);
619
620  return range;
621}
622
623/* A vector indexed by function unit instance giving the last insn to use
624   the unit.  The value of the function unit instance index for unit U
625   instance I is (U + I * FUNCTION_UNITS_SIZE).  */
626static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
627
628/* A vector indexed by function unit instance giving the minimum time when
629   the unit will unblock based on the maximum blockage cost.  */
630static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
631
632/* A vector indexed by function unit number giving the number of insns
633   that remain to use the unit.  */
634static int unit_n_insns[FUNCTION_UNITS_SIZE];
635
636/* Reset the function unit state to the null state.  */
637
638static void
639clear_units ()
640{
641  bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
642  bzero ((char *) unit_tick, sizeof (unit_tick));
643  bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
644}
645
646/* Record an insn as one that will use the units encoded by UNIT.  */
647
648__inline static void
649prepare_unit (unit)
650     int unit;
651{
652  int i;
653
654  if (unit >= 0)
655    unit_n_insns[unit]++;
656  else
657    for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
658      if ((unit & 1) != 0)
659	prepare_unit (i);
660}
661
662/* Return the actual hazard cost of executing INSN on the unit UNIT,
663   instance INSTANCE at time CLOCK if the previous actual hazard cost
664   was COST.  */
665
666__inline static int
667actual_hazard_this_instance (unit, instance, insn, clock, cost)
668     int unit, instance, clock, cost;
669     rtx insn;
670{
671  int tick = unit_tick[instance];
672
673  if (tick - clock > cost)
674    {
675      /* The scheduler is operating in reverse, so INSN is the executing
676	 insn and the unit's last insn is the candidate insn.  We want a
677	 more exact measure of the blockage if we execute INSN at CLOCK
678	 given when we committed the execution of the unit's last insn.
679
680	 The blockage value is given by either the unit's max blockage
681	 constant, blockage range function, or blockage function.  Use
682	 the most exact form for the given unit.  */
683
684      if (function_units[unit].blockage_range_function)
685	{
686	  if (function_units[unit].blockage_function)
687	    tick += (function_units[unit].blockage_function
688		     (insn, unit_last_insn[instance])
689		     - function_units[unit].max_blockage);
690	  else
691	    tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
692		     - function_units[unit].max_blockage);
693	}
694      if (tick - clock > cost)
695	cost = tick - clock;
696    }
697  return cost;
698}
699
700/* Record INSN as having begun execution on the units encoded by UNIT at
701   time CLOCK.  */
702
703__inline static void
704schedule_unit (unit, insn, clock)
705     int unit, clock;
706     rtx insn;
707{
708  int i;
709
710  if (unit >= 0)
711    {
712      int instance = unit;
713#if MAX_MULTIPLICITY > 1
714      /* Find the first free instance of the function unit and use that
715	 one.  We assume that one is free.  */
716      for (i = function_units[unit].multiplicity - 1; i > 0; i--)
717	{
718	  if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
719	    break;
720	  instance += FUNCTION_UNITS_SIZE;
721	}
722#endif
723      unit_last_insn[instance] = insn;
724      unit_tick[instance] = (clock + function_units[unit].max_blockage);
725    }
726  else
727    for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
728      if ((unit & 1) != 0)
729	schedule_unit (i, insn, clock);
730}
731
732/* Return the actual hazard cost of executing INSN on the units encoded by
733   UNIT at time CLOCK if the previous actual hazard cost was COST.  */
734
735__inline static int
736actual_hazard (unit, insn, clock, cost)
737     int unit, clock, cost;
738     rtx insn;
739{
740  int i;
741
742  if (unit >= 0)
743    {
744      /* Find the instance of the function unit with the minimum hazard.  */
745      int instance = unit;
746      int best_cost = actual_hazard_this_instance (unit, instance, insn,
747						   clock, cost);
748#if MAX_MULTIPLICITY > 1
749      int this_cost;
750
751      if (best_cost > cost)
752	{
753	  for (i = function_units[unit].multiplicity - 1; i > 0; i--)
754	    {
755	      instance += FUNCTION_UNITS_SIZE;
756	      this_cost = actual_hazard_this_instance (unit, instance, insn,
757						       clock, cost);
758	      if (this_cost < best_cost)
759		{
760		  best_cost = this_cost;
761		  if (this_cost <= cost)
762		    break;
763		}
764	    }
765	}
766#endif
767      cost = MAX (cost, best_cost);
768    }
769  else
770    for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
771      if ((unit & 1) != 0)
772	cost = actual_hazard (i, insn, clock, cost);
773
774  return cost;
775}
776
777/* Return the potential hazard cost of executing an instruction on the
778   units encoded by UNIT if the previous potential hazard cost was COST.
779   An insn with a large blockage time is chosen in preference to one
780   with a smaller time; an insn that uses a unit that is more likely
781   to be used is chosen in preference to one with a unit that is less
782   used.  We are trying to minimize a subsequent actual hazard.  */
783
784__inline static int
785potential_hazard (unit, insn, cost)
786     int unit, cost;
787     rtx insn;
788{
789  int i, ncost;
790  unsigned int minb, maxb;
791
792  if (unit >= 0)
793    {
794      minb = maxb = function_units[unit].max_blockage;
795      if (maxb > 1)
796	{
797	  if (function_units[unit].blockage_range_function)
798	    {
799	      maxb = minb = blockage_range (unit, insn);
800	      maxb = MAX_BLOCKAGE_COST (maxb);
801	      minb = MIN_BLOCKAGE_COST (minb);
802	    }
803
804	  if (maxb > 1)
805	    {
806	      /* Make the number of instructions left dominate.  Make the
807		 minimum delay dominate the maximum delay.  If all these
808		 are the same, use the unit number to add an arbitrary
809		 ordering.  Other terms can be added.  */
810	      ncost = minb * 0x40 + maxb;
811	      ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
812	      if (ncost > cost)
813		cost = ncost;
814	    }
815	}
816    }
817  else
818    for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
819      if ((unit & 1) != 0)
820	cost = potential_hazard (i, insn, cost);
821
822  return cost;
823}
824
825/* Compute cost of executing INSN given the dependence LINK on the insn USED.
826   This is the number of virtual cycles taken between instruction issue and
827   instruction results.  */
828
829__inline static int
830insn_cost (insn, link, used)
831     rtx insn, link, used;
832{
833  register int cost = INSN_COST (insn);
834
835  if (cost == 0)
836    {
837      recog_memoized (insn);
838
839      /* A USE insn, or something else we don't need to understand.
840	 We can't pass these directly to result_ready_cost because it will
841	 trigger a fatal error for unrecognizable insns.  */
842      if (INSN_CODE (insn) < 0)
843	{
844	  INSN_COST (insn) = 1;
845	  return 1;
846	}
847      else
848	{
849	  cost = result_ready_cost (insn);
850
851	  if (cost < 1)
852	    cost = 1;
853
854	  INSN_COST (insn) = cost;
855	}
856    }
857
858  /* A USE insn should never require the value used to be computed.  This
859     allows the computation of a function's result and parameter values to
860     overlap the return and call.  */
861  recog_memoized (used);
862  if (INSN_CODE (used) < 0)
863    LINK_COST_FREE (link) = 1;
864
865  /* If some dependencies vary the cost, compute the adjustment.  Most
866     commonly, the adjustment is complete: either the cost is ignored
867     (in the case of an output- or anti-dependence), or the cost is
868     unchanged.  These values are cached in the link as LINK_COST_FREE
869     and LINK_COST_ZERO.  */
870
871  if (LINK_COST_FREE (link))
872    cost = 1;
873#ifdef ADJUST_COST
874  else if (! LINK_COST_ZERO (link))
875    {
876      int ncost = cost;
877
878      ADJUST_COST (used, link, insn, ncost);
879      if (ncost <= 1)
880	LINK_COST_FREE (link) = ncost = 1;
881      if (cost == ncost)
882	LINK_COST_ZERO (link) = 1;
883      cost = ncost;
884    }
885#endif
886  return cost;
887}
888
889/* Compute the priority number for INSN.  */
890
891static int
892priority (insn)
893     rtx insn;
894{
895  if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
896    {
897      int prev_priority;
898      int max_priority;
899      int this_priority = INSN_PRIORITY (insn);
900      rtx prev;
901
902      if (this_priority > 0)
903	return this_priority;
904
905      max_priority = 1;
906
907      /* Nonzero if these insns must be scheduled together.  */
908      if (SCHED_GROUP_P (insn))
909	{
910	  prev = insn;
911	  while (SCHED_GROUP_P (prev))
912	    {
913	      prev = PREV_INSN (prev);
914	      INSN_REF_COUNT (prev) += 1;
915	    }
916	}
917
918      for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
919	{
920	  rtx x = XEXP (prev, 0);
921
922	  /* If this was a duplicate of a dependence we already deleted,
923	     ignore it.  */
924	  if (RTX_INTEGRATED_P (prev))
925	    continue;
926
927	  /* A dependence pointing to a note or deleted insn is always
928	     obsolete, because sched_analyze_insn will have created any
929	     necessary new dependences which replace it.  Notes and deleted
930	     insns can be created when instructions are deleted by insn
931	     splitting, or by register allocation.  */
932	  if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
933	    {
934	      remove_dependence (insn, x);
935	      continue;
936	    }
937
938	  /* Clear the link cost adjustment bits.  */
939	  LINK_COST_FREE (prev) = 0;
940#ifdef ADJUST_COST
941	  LINK_COST_ZERO (prev) = 0;
942#endif
943
944	  /* This priority calculation was chosen because it results in the
945	     least instruction movement, and does not hurt the performance
946	     of the resulting code compared to the old algorithm.
947	     This makes the sched algorithm more stable, which results
948	     in better code, because there is less register pressure,
949	     cross jumping is more likely to work, and debugging is easier.
950
951	     When all instructions have a latency of 1, there is no need to
952	     move any instructions.  Subtracting one here ensures that in such
953	     cases all instructions will end up with a priority of one, and
954	     hence no scheduling will be done.
955
956	     The original code did not subtract the one, and added the
957	     insn_cost of the current instruction to its priority (e.g.
958	     move the insn_cost call down to the end).  */
959
960	  prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
961
962	  if (prev_priority > max_priority)
963	    max_priority = prev_priority;
964	  INSN_REF_COUNT (x) += 1;
965	}
966
967      prepare_unit (insn_unit (insn));
968      INSN_PRIORITY (insn) = max_priority;
969      return INSN_PRIORITY (insn);
970    }
971  return 0;
972}
973
974/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
975   them to the unused_*_list variables, so that they can be reused.  */
976
977static void
978free_pending_lists ()
979{
980  register rtx link, prev_link;
981
982  if (pending_read_insns)
983    {
984      prev_link = pending_read_insns;
985      link = XEXP (prev_link, 1);
986
987      while (link)
988	{
989	  prev_link = link;
990	  link = XEXP (link, 1);
991	}
992
993      XEXP (prev_link, 1) = unused_insn_list;
994      unused_insn_list = pending_read_insns;
995      pending_read_insns = 0;
996    }
997
998  if (pending_write_insns)
999    {
1000      prev_link = pending_write_insns;
1001      link = XEXP (prev_link, 1);
1002
1003      while (link)
1004	{
1005	  prev_link = link;
1006	  link = XEXP (link, 1);
1007	}
1008
1009      XEXP (prev_link, 1) = unused_insn_list;
1010      unused_insn_list = pending_write_insns;
1011      pending_write_insns = 0;
1012    }
1013
1014  if (pending_read_mems)
1015    {
1016      prev_link = pending_read_mems;
1017      link = XEXP (prev_link, 1);
1018
1019      while (link)
1020	{
1021	  prev_link = link;
1022	  link = XEXP (link, 1);
1023	}
1024
1025      XEXP (prev_link, 1) = unused_expr_list;
1026      unused_expr_list = pending_read_mems;
1027      pending_read_mems = 0;
1028    }
1029
1030  if (pending_write_mems)
1031    {
1032      prev_link = pending_write_mems;
1033      link = XEXP (prev_link, 1);
1034
1035      while (link)
1036	{
1037	  prev_link = link;
1038	  link = XEXP (link, 1);
1039	}
1040
1041      XEXP (prev_link, 1) = unused_expr_list;
1042      unused_expr_list = pending_write_mems;
1043      pending_write_mems = 0;
1044    }
1045}
1046
1047/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1048   The MEM is a memory reference contained within INSN, which we are saving
1049   so that we can do memory aliasing on it.  */
1050
1051static void
1052add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1053     rtx *insn_list, *mem_list, insn, mem;
1054{
1055  register rtx link;
1056
1057  if (unused_insn_list)
1058    {
1059      link = unused_insn_list;
1060      unused_insn_list = XEXP (link, 1);
1061    }
1062  else
1063    link = rtx_alloc (INSN_LIST);
1064  XEXP (link, 0) = insn;
1065  XEXP (link, 1) = *insn_list;
1066  *insn_list = link;
1067
1068  if (unused_expr_list)
1069    {
1070      link = unused_expr_list;
1071      unused_expr_list = XEXP (link, 1);
1072    }
1073  else
1074    link = rtx_alloc (EXPR_LIST);
1075  XEXP (link, 0) = mem;
1076  XEXP (link, 1) = *mem_list;
1077  *mem_list = link;
1078
1079  pending_lists_length++;
1080}
1081
1082/* Make a dependency between every memory reference on the pending lists
1083   and INSN, thus flushing the pending lists.  If ONLY_WRITE, don't flush
1084   the read list.  */
1085
1086static void
1087flush_pending_lists (insn, only_write)
1088     rtx insn;
1089     int only_write;
1090{
1091  rtx link;
1092
1093  while (pending_read_insns && ! only_write)
1094    {
1095      add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1096
1097      link = pending_read_insns;
1098      pending_read_insns = XEXP (pending_read_insns, 1);
1099      XEXP (link, 1) = unused_insn_list;
1100      unused_insn_list = link;
1101
1102      link = pending_read_mems;
1103      pending_read_mems = XEXP (pending_read_mems, 1);
1104      XEXP (link, 1) = unused_expr_list;
1105      unused_expr_list = link;
1106    }
1107  while (pending_write_insns)
1108    {
1109      add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1110
1111      link = pending_write_insns;
1112      pending_write_insns = XEXP (pending_write_insns, 1);
1113      XEXP (link, 1) = unused_insn_list;
1114      unused_insn_list = link;
1115
1116      link = pending_write_mems;
1117      pending_write_mems = XEXP (pending_write_mems, 1);
1118      XEXP (link, 1) = unused_expr_list;
1119      unused_expr_list = link;
1120    }
1121  pending_lists_length = 0;
1122
1123  if (last_pending_memory_flush)
1124    add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1125
1126  last_pending_memory_flush = insn;
1127}
1128
1129/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1130   by the write to the destination of X, and reads of everything mentioned.  */
1131
1132static void
1133sched_analyze_1 (x, insn)
1134     rtx x;
1135     rtx insn;
1136{
1137  register int regno;
1138  register rtx dest = SET_DEST (x);
1139
1140  if (dest == 0)
1141    return;
1142
1143  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1144	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1145    {
1146      if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1147	{
1148	  /* The second and third arguments are values read by this insn.  */
1149	  sched_analyze_2 (XEXP (dest, 1), insn);
1150	  sched_analyze_2 (XEXP (dest, 2), insn);
1151	}
1152      dest = SUBREG_REG (dest);
1153    }
1154
1155  if (GET_CODE (dest) == REG)
1156    {
1157      register int i;
1158
1159      regno = REGNO (dest);
1160
1161      /* A hard reg in a wide mode may really be multiple registers.
1162	 If so, mark all of them just like the first.  */
1163      if (regno < FIRST_PSEUDO_REGISTER)
1164	{
1165	  i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1166	  while (--i >= 0)
1167	    {
1168	      rtx u;
1169
1170	      for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1171		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1172	      reg_last_uses[regno + i] = 0;
1173	      if (reg_last_sets[regno + i])
1174		add_dependence (insn, reg_last_sets[regno + i],
1175				REG_DEP_OUTPUT);
1176	      SET_REGNO_REG_SET (reg_pending_sets, regno + i);
1177	      if ((call_used_regs[i] || global_regs[i])
1178		  && last_function_call)
1179		/* Function calls clobber all call_used regs.  */
1180		add_dependence (insn, last_function_call, REG_DEP_ANTI);
1181	    }
1182	}
1183      else
1184	{
1185	  rtx u;
1186
1187	  for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1188	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1189	  reg_last_uses[regno] = 0;
1190	  if (reg_last_sets[regno])
1191	    add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1192	  SET_REGNO_REG_SET (reg_pending_sets, regno);
1193
1194	  /* Pseudos that are REG_EQUIV to something may be replaced
1195	     by that during reloading.  We need only add dependencies for
1196	     the address in the REG_EQUIV note.  */
1197	  if (! reload_completed
1198	      && reg_known_equiv_p[regno]
1199	      && GET_CODE (reg_known_value[regno]) == MEM)
1200	    sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1201
1202	  /* Don't let it cross a call after scheduling if it doesn't
1203	     already cross one.  */
1204	  if (REG_N_CALLS_CROSSED (regno) == 0 && last_function_call)
1205	    add_dependence (insn, last_function_call, REG_DEP_ANTI);
1206	}
1207    }
1208  else if (GET_CODE (dest) == MEM)
1209    {
1210      /* Writing memory.  */
1211
1212      if (pending_lists_length > 32)
1213	{
1214	  /* Flush all pending reads and writes to prevent the pending lists
1215	     from getting any larger.  Insn scheduling runs too slowly when
1216	     these lists get long.  The number 32 was chosen because it
1217	     seems like a reasonable number.  When compiling GCC with itself,
1218	     this flush occurs 8 times for sparc, and 10 times for m88k using
1219	     the number 32.  */
1220	  flush_pending_lists (insn, 0);
1221	}
1222      else
1223	{
1224	  rtx pending, pending_mem;
1225
1226	  pending = pending_read_insns;
1227	  pending_mem = pending_read_mems;
1228	  while (pending)
1229	    {
1230	      /* If a dependency already exists, don't create a new one.  */
1231	      if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1232		if (anti_dependence (XEXP (pending_mem, 0), dest))
1233		  add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1234
1235	      pending = XEXP (pending, 1);
1236	      pending_mem = XEXP (pending_mem, 1);
1237	    }
1238
1239	  pending = pending_write_insns;
1240	  pending_mem = pending_write_mems;
1241	  while (pending)
1242	    {
1243	      /* If a dependency already exists, don't create a new one.  */
1244	      if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1245		if (output_dependence (XEXP (pending_mem, 0), dest))
1246		  add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1247
1248	      pending = XEXP (pending, 1);
1249	      pending_mem = XEXP (pending_mem, 1);
1250	    }
1251
1252	  if (last_pending_memory_flush)
1253	    add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1254
1255	  add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1256				   insn, dest);
1257	}
1258      sched_analyze_2 (XEXP (dest, 0), insn);
1259    }
1260
1261  /* Analyze reads.  */
1262  if (GET_CODE (x) == SET)
1263    sched_analyze_2 (SET_SRC (x), insn);
1264}
1265
1266/* Analyze the uses of memory and registers in rtx X in INSN.  */
1267
1268static void
1269sched_analyze_2 (x, insn)
1270     rtx x;
1271     rtx insn;
1272{
1273  register int i;
1274  register int j;
1275  register enum rtx_code code;
1276  register char *fmt;
1277
1278  if (x == 0)
1279    return;
1280
1281  code = GET_CODE (x);
1282
1283  switch (code)
1284    {
1285    case CONST_INT:
1286    case CONST_DOUBLE:
1287    case SYMBOL_REF:
1288    case CONST:
1289    case LABEL_REF:
1290      /* Ignore constants.  Note that we must handle CONST_DOUBLE here
1291	 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1292	 this does not mean that this insn is using cc0.  */
1293      return;
1294
1295#ifdef HAVE_cc0
1296    case CC0:
1297      {
1298	rtx link, prev;
1299
1300	/* User of CC0 depends on immediately preceding insn.  */
1301	SCHED_GROUP_P (insn) = 1;
1302
1303	/* There may be a note before this insn now, but all notes will
1304	   be removed before we actually try to schedule the insns, so
1305	   it won't cause a problem later.  We must avoid it here though.  */
1306	prev = prev_nonnote_insn (insn);
1307
1308	/* Make a copy of all dependencies on the immediately previous insn,
1309	   and add to this insn.  This is so that all the dependencies will
1310	   apply to the group.  Remove an explicit dependence on this insn
1311	   as SCHED_GROUP_P now represents it.  */
1312
1313	if (find_insn_list (prev, LOG_LINKS (insn)))
1314	  remove_dependence (insn, prev);
1315
1316	for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1317	  add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1318
1319	return;
1320      }
1321#endif
1322
1323    case REG:
1324      {
1325	int regno = REGNO (x);
1326	if (regno < FIRST_PSEUDO_REGISTER)
1327	  {
1328	    int i;
1329
1330	    i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1331	    while (--i >= 0)
1332	      {
1333		reg_last_uses[regno + i]
1334		  = gen_rtx_INSN_LIST (VOIDmode,
1335				       insn, reg_last_uses[regno + i]);
1336		if (reg_last_sets[regno + i])
1337		  add_dependence (insn, reg_last_sets[regno + i], 0);
1338		if ((call_used_regs[regno + i] || global_regs[regno + i])
1339		    && last_function_call)
1340		  /* Function calls clobber all call_used regs.  */
1341		  add_dependence (insn, last_function_call, REG_DEP_ANTI);
1342	      }
1343	  }
1344	else
1345	  {
1346	    reg_last_uses[regno]
1347	      = gen_rtx_INSN_LIST (VOIDmode, insn, reg_last_uses[regno]);
1348	    if (reg_last_sets[regno])
1349	      add_dependence (insn, reg_last_sets[regno], 0);
1350
1351	    /* Pseudos that are REG_EQUIV to something may be replaced
1352	       by that during reloading.  We need only add dependencies for
1353	       the address in the REG_EQUIV note.  */
1354	    if (! reload_completed
1355		&& reg_known_equiv_p[regno]
1356		&& GET_CODE (reg_known_value[regno]) == MEM)
1357	      sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1358
1359	    /* If the register does not already cross any calls, then add this
1360	       insn to the sched_before_next_call list so that it will still
1361	       not cross calls after scheduling.  */
1362	    if (REG_N_CALLS_CROSSED (regno) == 0)
1363	      add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1364	  }
1365	return;
1366      }
1367
1368    case MEM:
1369      {
1370	/* Reading memory.  */
1371
1372	rtx pending, pending_mem;
1373
1374	pending = pending_read_insns;
1375	pending_mem = pending_read_mems;
1376	while (pending)
1377	  {
1378	    /* If a dependency already exists, don't create a new one.  */
1379	    if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1380	      if (read_dependence (XEXP (pending_mem, 0), x))
1381		add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1382
1383	    pending = XEXP (pending, 1);
1384	    pending_mem = XEXP (pending_mem, 1);
1385	  }
1386
1387	pending = pending_write_insns;
1388	pending_mem = pending_write_mems;
1389	while (pending)
1390	  {
1391	    /* If a dependency already exists, don't create a new one.  */
1392	    if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1393	      if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
1394				   x, rtx_varies_p))
1395		add_dependence (insn, XEXP (pending, 0), 0);
1396
1397	    pending = XEXP (pending, 1);
1398	    pending_mem = XEXP (pending_mem, 1);
1399	  }
1400	if (last_pending_memory_flush)
1401	  add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1402
1403	/* Always add these dependencies to pending_reads, since
1404	   this insn may be followed by a write.  */
1405	add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1406				 insn, x);
1407
1408	/* Take advantage of tail recursion here.  */
1409	sched_analyze_2 (XEXP (x, 0), insn);
1410	return;
1411      }
1412
1413    case ASM_OPERANDS:
1414    case ASM_INPUT:
1415    case UNSPEC_VOLATILE:
1416    case TRAP_IF:
1417      {
1418	rtx u;
1419
1420	/* Traditional and volatile asm instructions must be considered to use
1421	   and clobber all hard registers, all pseudo-registers and all of
1422	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
1423
1424	   Consider for instance a volatile asm that changes the fpu rounding
1425	   mode.  An insn should not be moved across this even if it only uses
1426	   pseudo-regs because it might give an incorrectly rounded result.  */
1427	if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1428	  {
1429	    int max_reg = max_reg_num ();
1430	    for (i = 0; i < max_reg; i++)
1431	      {
1432		for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1433		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1434		reg_last_uses[i] = 0;
1435		if (reg_last_sets[i])
1436		  add_dependence (insn, reg_last_sets[i], 0);
1437	      }
1438	    reg_pending_sets_all = 1;
1439
1440	    flush_pending_lists (insn, 0);
1441	  }
1442
1443	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
1444	   We can not just fall through here since then we would be confused
1445	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1446	   traditional asms unlike their normal usage.  */
1447
1448	if (code == ASM_OPERANDS)
1449	  {
1450	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1451	      sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1452	    return;
1453	  }
1454	break;
1455      }
1456
1457    case PRE_DEC:
1458    case POST_DEC:
1459    case PRE_INC:
1460    case POST_INC:
1461      /* These both read and modify the result.  We must handle them as writes
1462	 to get proper dependencies for following instructions.  We must handle
1463	 them as reads to get proper dependencies from this to previous
1464	 instructions.  Thus we need to pass them to both sched_analyze_1
1465	 and sched_analyze_2.  We must call sched_analyze_2 first in order
1466	 to get the proper antecedent for the read.  */
1467      sched_analyze_2 (XEXP (x, 0), insn);
1468      sched_analyze_1 (x, insn);
1469      return;
1470
1471    default:
1472      break;
1473    }
1474
1475  /* Other cases: walk the insn.  */
1476  fmt = GET_RTX_FORMAT (code);
1477  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1478    {
1479      if (fmt[i] == 'e')
1480	sched_analyze_2 (XEXP (x, i), insn);
1481      else if (fmt[i] == 'E')
1482	for (j = 0; j < XVECLEN (x, i); j++)
1483	  sched_analyze_2 (XVECEXP (x, i, j), insn);
1484    }
1485}
1486
1487/* Analyze an INSN with pattern X to find all dependencies.  */
1488
1489static void
1490sched_analyze_insn (x, insn, loop_notes)
1491     rtx x, insn;
1492     rtx loop_notes;
1493{
1494  register RTX_CODE code = GET_CODE (x);
1495  rtx link;
1496  int maxreg = max_reg_num ();
1497  int i;
1498
1499  if (code == SET || code == CLOBBER)
1500    sched_analyze_1 (x, insn);
1501  else if (code == PARALLEL)
1502    {
1503      register int i;
1504      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1505	{
1506	  code = GET_CODE (XVECEXP (x, 0, i));
1507	  if (code == SET || code == CLOBBER)
1508	    sched_analyze_1 (XVECEXP (x, 0, i), insn);
1509	  else
1510	    sched_analyze_2 (XVECEXP (x, 0, i), insn);
1511	}
1512    }
1513  else
1514    sched_analyze_2 (x, insn);
1515
1516  /* Mark registers CLOBBERED or used by called function.  */
1517  if (GET_CODE (insn) == CALL_INSN)
1518    for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1519      {
1520	if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1521	  sched_analyze_1 (XEXP (link, 0), insn);
1522	else
1523	  sched_analyze_2 (XEXP (link, 0), insn);
1524      }
1525
1526  /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then
1527     we must be sure that no instructions are scheduled across it.
1528     Otherwise, the reg_n_refs info (which depends on loop_depth) would
1529     become incorrect.  */
1530
1531  if (loop_notes)
1532    {
1533      int max_reg = max_reg_num ();
1534      rtx link;
1535
1536      for (i = 0; i < max_reg; i++)
1537	{
1538	  rtx u;
1539	  for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1540	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1541	  reg_last_uses[i] = 0;
1542	  if (reg_last_sets[i])
1543	    add_dependence (insn, reg_last_sets[i], 0);
1544	}
1545      reg_pending_sets_all = 1;
1546
1547      flush_pending_lists (insn, 0);
1548
1549      link = loop_notes;
1550      while (XEXP (link, 1))
1551	link = XEXP (link, 1);
1552      XEXP (link, 1) = REG_NOTES (insn);
1553      REG_NOTES (insn) = loop_notes;
1554    }
1555
1556  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1557			     {
1558			       reg_last_sets[i] = insn;
1559			     });
1560  CLEAR_REG_SET (reg_pending_sets);
1561
1562  if (reg_pending_sets_all)
1563    {
1564      for (i = 0; i < maxreg; i++)
1565	reg_last_sets[i] = insn;
1566      reg_pending_sets_all = 0;
1567    }
1568
1569  /* Handle function calls and function returns created by the epilogue
1570     threading code.  */
1571  if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1572    {
1573      rtx dep_insn;
1574      rtx prev_dep_insn;
1575
1576      /* When scheduling instructions, we make sure calls don't lose their
1577	 accompanying USE insns by depending them one on another in order.
1578
1579	 Also, we must do the same thing for returns created by the epilogue
1580	 threading code.  Note this code works only in this special case,
1581	 because other passes make no guarantee that they will never emit
1582	 an instruction between a USE and a RETURN.  There is such a guarantee
1583	 for USE instructions immediately before a call.  */
1584
1585      prev_dep_insn = insn;
1586      dep_insn = PREV_INSN (insn);
1587      while (GET_CODE (dep_insn) == INSN
1588	     && GET_CODE (PATTERN (dep_insn)) == USE
1589	     && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
1590	{
1591	  SCHED_GROUP_P (prev_dep_insn) = 1;
1592
1593	  /* Make a copy of all dependencies on dep_insn, and add to insn.
1594	     This is so that all of the dependencies will apply to the
1595	     group.  */
1596
1597	  for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1598	    add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1599
1600	  prev_dep_insn = dep_insn;
1601	  dep_insn = PREV_INSN (dep_insn);
1602	}
1603    }
1604}
1605
1606/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1607   for every dependency.  */
1608
1609static int
1610sched_analyze (head, tail)
1611     rtx head, tail;
1612{
1613  register rtx insn;
1614  register int n_insns = 0;
1615  register rtx u;
1616  register int luid = 0;
1617  rtx loop_notes = 0;
1618
1619  for (insn = head; ; insn = NEXT_INSN (insn))
1620    {
1621      INSN_LUID (insn) = luid++;
1622
1623      if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1624	{
1625	  sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1626	  loop_notes = 0;
1627	  n_insns += 1;
1628	}
1629      else if (GET_CODE (insn) == CALL_INSN)
1630	{
1631	  rtx x;
1632	  register int i;
1633
1634	  /* Any instruction using a hard register which may get clobbered
1635	     by a call needs to be marked as dependent on this call.
1636	     This prevents a use of a hard return reg from being moved
1637	     past a void call (i.e. it does not explicitly set the hard
1638	     return reg).  */
1639
1640	  /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1641	     all registers, not just hard registers, may be clobbered by this
1642	     call.  */
1643
1644	  /* Insn, being a CALL_INSN, magically depends on
1645	     `last_function_call' already.  */
1646
1647	  if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
1648	      && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
1649	    {
1650	      int max_reg = max_reg_num ();
1651	      for (i = 0; i < max_reg; i++)
1652		{
1653		  for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1654		    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1655		  reg_last_uses[i] = 0;
1656		  if (reg_last_sets[i])
1657		    add_dependence (insn, reg_last_sets[i], 0);
1658		}
1659	      reg_pending_sets_all = 1;
1660
1661	      /* Add a pair of fake REG_NOTEs which we will later
1662		 convert back into a NOTE_INSN_SETJMP note.  See
1663		 reemit_notes for why we use a pair of NOTEs.  */
1664
1665	      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1666						    GEN_INT (0),
1667						    REG_NOTES (insn));
1668	      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
1669						    GEN_INT (NOTE_INSN_SETJMP),
1670						    REG_NOTES (insn));
1671	    }
1672	  else
1673	    {
1674	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1675		if (call_used_regs[i] || global_regs[i])
1676		  {
1677		    for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1678		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1679		    reg_last_uses[i] = 0;
1680		    if (reg_last_sets[i])
1681		      add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
1682		    SET_REGNO_REG_SET (reg_pending_sets, i);
1683		  }
1684	    }
1685
1686	  /* For each insn which shouldn't cross a call, add a dependence
1687	     between that insn and this call insn.  */
1688	  x = LOG_LINKS (sched_before_next_call);
1689	  while (x)
1690	    {
1691	      add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
1692	      x = XEXP (x, 1);
1693	    }
1694	  LOG_LINKS (sched_before_next_call) = 0;
1695
1696	  sched_analyze_insn (PATTERN (insn), insn, loop_notes);
1697	  loop_notes = 0;
1698
1699	  /* In the absence of interprocedural alias analysis, we must flush
1700	     all pending reads and writes, and start new dependencies starting
1701	     from here.  But only flush writes for constant calls (which may
1702	     be passed a pointer to something we haven't written yet).  */
1703	  flush_pending_lists (insn, CONST_CALL_P (insn));
1704
1705	  /* Depend this function call (actually, the user of this
1706	     function call) on all hard register clobberage.  */
1707	  last_function_call = insn;
1708	  n_insns += 1;
1709	}
1710
1711      /* See comments on reemit_notes as to why we do this.  */
1712      else if (GET_CODE (insn) == NOTE
1713	       && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1714		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1715		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1716		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
1717		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
1718		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END
1719		   || (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
1720		       && GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
1721	{
1722	  loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1723					  GEN_INT (NOTE_BLOCK_NUMBER (insn)),
1724					  loop_notes);
1725	  loop_notes = gen_rtx_EXPR_LIST (REG_DEAD,
1726					  GEN_INT (NOTE_LINE_NUMBER (insn)),
1727					  loop_notes);
1728	  CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
1729	}
1730
1731      if (insn == tail)
1732	return n_insns;
1733    }
1734
1735  abort ();
1736}
1737
1738/* Called when we see a set of a register.  If death is true, then we are
1739   scanning backwards.  Mark that register as unborn.  If nobody says
1740   otherwise, that is how things will remain.  If death is false, then we
1741   are scanning forwards.  Mark that register as being born.  */
1742
1743static void
1744sched_note_set (x, death)
1745     rtx x;
1746     int death;
1747{
1748  register int regno;
1749  register rtx reg = SET_DEST (x);
1750  int subreg_p = 0;
1751
1752  if (reg == 0)
1753    return;
1754
1755  while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
1756	 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
1757    {
1758      /* Must treat modification of just one hardware register of a multi-reg
1759	 value or just a byte field of a register exactly the same way that
1760	 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
1761	 does not kill the entire register.  */
1762      if (GET_CODE (reg) != SUBREG
1763	  || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
1764	subreg_p = 1;
1765
1766      reg = SUBREG_REG (reg);
1767    }
1768
1769  if (GET_CODE (reg) != REG)
1770    return;
1771
1772  /* Global registers are always live, so the code below does not apply
1773     to them.  */
1774
1775  regno = REGNO (reg);
1776  if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
1777    {
1778      if (death)
1779	{
1780	  /* If we only set part of the register, then this set does not
1781	     kill it.  */
1782	  if (subreg_p)
1783	    return;
1784
1785	  /* Try killing this register.  */
1786	  if (regno < FIRST_PSEUDO_REGISTER)
1787	    {
1788	      int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1789	      while (--j >= 0)
1790		{
1791		  CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
1792		  SET_REGNO_REG_SET (bb_dead_regs, regno + j);
1793		}
1794	    }
1795	  else
1796	    {
1797	      CLEAR_REGNO_REG_SET (bb_live_regs, regno);
1798	      SET_REGNO_REG_SET (bb_dead_regs, regno);
1799	    }
1800	}
1801      else
1802	{
1803	  /* Make the register live again.  */
1804	  if (regno < FIRST_PSEUDO_REGISTER)
1805	    {
1806	      int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1807	      while (--j >= 0)
1808		{
1809		  SET_REGNO_REG_SET (bb_live_regs, regno + j);
1810		  CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
1811		}
1812	    }
1813	  else
1814	    {
1815	      SET_REGNO_REG_SET (bb_live_regs, regno);
1816	      CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
1817	    }
1818	}
1819    }
1820}
1821
1822/* Macros and functions for keeping the priority queue sorted, and
1823   dealing with queueing and dequeueing of instructions.  */
1824
1825#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
1826  do { if ((NEW_READY) - (OLD_READY) == 1)				\
1827	 swap_sort (READY, NEW_READY);					\
1828       else if ((NEW_READY) - (OLD_READY) > 1)				\
1829	 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); }	\
1830  while (0)
1831
1832/* Returns a positive value if y is preferred; returns a negative value if
1833   x is preferred.  Should never return 0, since that will make the sort
1834   unstable.  */
1835
1836static int
1837rank_for_schedule (x, y)
1838     const GENERIC_PTR x;
1839     const GENERIC_PTR y;
1840{
1841  rtx tmp = *(rtx *)y;
1842  rtx tmp2 = *(rtx *)x;
1843  rtx link;
1844  int tmp_class, tmp2_class;
1845  int value;
1846
1847  /* Choose the instruction with the highest priority, if different.  */
1848  if ((value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)))
1849    return value;
1850
1851  if (last_scheduled_insn)
1852    {
1853      /* Classify the instructions into three classes:
1854	 1) Data dependent on last schedule insn.
1855	 2) Anti/Output dependent on last scheduled insn.
1856	 3) Independent of last scheduled insn, or has latency of one.
1857	 Choose the insn from the highest numbered class if different.  */
1858      link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
1859      if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
1860	tmp_class = 3;
1861      else if (REG_NOTE_KIND (link) == 0) /* Data dependence.  */
1862	tmp_class = 1;
1863      else
1864	tmp_class = 2;
1865
1866      link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
1867      if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
1868	tmp2_class = 3;
1869      else if (REG_NOTE_KIND (link) == 0) /* Data dependence.  */
1870	tmp2_class = 1;
1871      else
1872	tmp2_class = 2;
1873
1874      if ((value = tmp_class - tmp2_class))
1875	return value;
1876    }
1877
1878  /* If insns are equally good, sort by INSN_LUID (original insn order),
1879     so that we make the sort stable.  This minimizes instruction movement,
1880     thus minimizing sched's effect on debugging and cross-jumping.  */
1881  return INSN_LUID (tmp) - INSN_LUID (tmp2);
1882}
1883
1884/* Resort the array A in which only element at index N may be out of order.  */
1885
1886__inline static void
1887swap_sort (a, n)
1888     rtx *a;
1889     int n;
1890{
1891  rtx insn = a[n-1];
1892  int i = n-2;
1893
1894  while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
1895    {
1896      a[i+1] = a[i];
1897      i -= 1;
1898    }
1899  a[i+1] = insn;
1900}
1901
1902static int max_priority;
1903
1904/* Add INSN to the insn queue so that it fires at least N_CYCLES
1905   before the currently executing insn.  */
1906
1907__inline static void
1908queue_insn (insn, n_cycles)
1909     rtx insn;
1910     int n_cycles;
1911{
1912  int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1913  NEXT_INSN (insn) = insn_queue[next_q];
1914  insn_queue[next_q] = insn;
1915  q_size += 1;
1916}
1917
1918/* Return nonzero if PAT is the pattern of an insn which makes a
1919   register live.  */
1920
1921__inline static int
1922birthing_insn_p (pat)
1923     rtx pat;
1924{
1925  int j;
1926
1927  if (reload_completed == 1)
1928    return 0;
1929
1930  if (GET_CODE (pat) == SET
1931      && GET_CODE (SET_DEST (pat)) == REG)
1932    {
1933      rtx dest = SET_DEST (pat);
1934      int i = REGNO (dest);
1935
1936      /* It would be more accurate to use refers_to_regno_p or
1937	 reg_mentioned_p to determine when the dest is not live before this
1938	 insn.  */
1939
1940      if (REGNO_REG_SET_P (bb_live_regs, i))
1941	return (REG_N_SETS (i) == 1);
1942
1943      return 0;
1944    }
1945  if (GET_CODE (pat) == PARALLEL)
1946    {
1947      for (j = 0; j < XVECLEN (pat, 0); j++)
1948	if (birthing_insn_p (XVECEXP (pat, 0, j)))
1949	  return 1;
1950    }
1951  return 0;
1952}
1953
1954/* PREV is an insn that is ready to execute.  Adjust its priority if that
1955   will help shorten register lifetimes.  */
1956
1957__inline static void
1958adjust_priority (prev)
1959     rtx prev;
1960{
1961  /* Trying to shorten register lives after reload has completed
1962     is useless and wrong.  It gives inaccurate schedules.  */
1963  if (reload_completed == 0)
1964    {
1965      rtx note;
1966      int n_deaths = 0;
1967
1968      /* ??? This code has no effect, because REG_DEAD notes are removed
1969	 before we ever get here.  */
1970      for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
1971	if (REG_NOTE_KIND (note) == REG_DEAD)
1972	  n_deaths += 1;
1973
1974      /* Defer scheduling insns which kill registers, since that
1975	 shortens register lives.  Prefer scheduling insns which
1976	 make registers live for the same reason.  */
1977      switch (n_deaths)
1978	{
1979	default:
1980	  INSN_PRIORITY (prev) >>= 3;
1981	  break;
1982	case 3:
1983	  INSN_PRIORITY (prev) >>= 2;
1984	  break;
1985	case 2:
1986	case 1:
1987	  INSN_PRIORITY (prev) >>= 1;
1988	  break;
1989	case 0:
1990	  if (birthing_insn_p (PATTERN (prev)))
1991	    {
1992	      int max = max_priority;
1993
1994	      if (max > INSN_PRIORITY (prev))
1995		INSN_PRIORITY (prev) = max;
1996	    }
1997	  break;
1998	}
1999#ifdef ADJUST_PRIORITY
2000      ADJUST_PRIORITY (prev);
2001#endif
2002    }
2003}
2004
2005/* INSN is the "currently executing insn".  Launch each insn which was
2006   waiting on INSN (in the backwards dataflow sense).  READY is a
2007   vector of insns which are ready to fire.  N_READY is the number of
2008   elements in READY.  CLOCK is the current virtual cycle.  */
2009
2010static int
2011schedule_insn (insn, ready, n_ready, clock)
2012     rtx insn;
2013     rtx *ready;
2014     int n_ready;
2015     int clock;
2016{
2017  rtx link;
2018  int new_ready = n_ready;
2019
2020  if (MAX_BLOCKAGE > 1)
2021    schedule_unit (insn_unit (insn), insn, clock);
2022
2023  if (LOG_LINKS (insn) == 0)
2024    return n_ready;
2025
2026  /* This is used by the function adjust_priority above.  */
2027  if (n_ready > 0)
2028    max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2029  else
2030    max_priority = INSN_PRIORITY (insn);
2031
2032  for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2033    {
2034      rtx prev = XEXP (link, 0);
2035      int cost = insn_cost (prev, link, insn);
2036
2037      if ((INSN_REF_COUNT (prev) -= 1) != 0)
2038	{
2039	  /* We satisfied one requirement to fire PREV.  Record the earliest
2040	     time when PREV can fire.  No need to do this if the cost is 1,
2041	     because PREV can fire no sooner than the next cycle.  */
2042	  if (cost > 1)
2043	    INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2044	}
2045      else
2046	{
2047	  /* We satisfied the last requirement to fire PREV.  Ensure that all
2048	     timing requirements are satisfied.  */
2049	  if (INSN_TICK (prev) - clock > cost)
2050	    cost = INSN_TICK (prev) - clock;
2051
2052	  /* Adjust the priority of PREV and either put it on the ready
2053	     list or queue it.  */
2054	  adjust_priority (prev);
2055	  if (cost <= 1)
2056	    ready[new_ready++] = prev;
2057	  else
2058	    queue_insn (prev, cost);
2059	}
2060    }
2061
2062  return new_ready;
2063}
2064
2065/* Given N_READY insns in the ready list READY at time CLOCK, queue
2066   those that are blocked due to function unit hazards and rearrange
2067   the remaining ones to minimize subsequent function unit hazards.  */
2068
2069static int
2070schedule_select (ready, n_ready, clock, file)
2071     rtx *ready;
2072     int n_ready, clock;
2073     FILE *file;
2074{
2075  int pri = INSN_PRIORITY (ready[0]);
2076  int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2077  rtx insn;
2078
2079  /* Work down the ready list in groups of instructions with the same
2080     priority value.  Queue insns in the group that are blocked and
2081     select among those that remain for the one with the largest
2082     potential hazard.  */
2083  for (i = 0; i < n_ready; i = j)
2084    {
2085      int opri = pri;
2086      for (j = i + 1; j < n_ready; j++)
2087	if ((pri = INSN_PRIORITY (ready[j])) != opri)
2088	  break;
2089
2090      /* Queue insns in the group that are blocked.  */
2091      for (k = i, q = 0; k < j; k++)
2092	{
2093	  insn = ready[k];
2094	  if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2095	    {
2096	      q++;
2097	      ready[k] = 0;
2098	      queue_insn (insn, cost);
2099	      if (file)
2100		fprintf (file, "\n;; blocking insn %d for %d cycles",
2101			 INSN_UID (insn), cost);
2102	    }
2103	}
2104      new_ready -= q;
2105
2106      /* Check the next group if all insns were queued.  */
2107      if (j - i - q == 0)
2108	continue;
2109
2110      /* If more than one remains, select the first one with the largest
2111	 potential hazard.  */
2112      else if (j - i - q > 1)
2113	{
2114	  best_cost = -1;
2115	  for (k = i; k < j; k++)
2116	    {
2117	      if ((insn = ready[k]) == 0)
2118		continue;
2119	      if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2120		  > best_cost)
2121		{
2122		  best_cost = cost;
2123		  best_insn = k;
2124		}
2125	    }
2126	}
2127      /* We have found a suitable insn to schedule.  */
2128      break;
2129    }
2130
2131  /* Move the best insn to be front of the ready list.  */
2132  if (best_insn != 0)
2133    {
2134      if (file)
2135	{
2136	  fprintf (file, ", now");
2137	  for (i = 0; i < n_ready; i++)
2138	    if (ready[i])
2139	      fprintf (file, " %d", INSN_UID (ready[i]));
2140	  fprintf (file, "\n;; insn %d has a greater potential hazard",
2141		   INSN_UID (ready[best_insn]));
2142	}
2143      for (i = best_insn; i > 0; i--)
2144	{
2145	  insn = ready[i-1];
2146	  ready[i-1] = ready[i];
2147	  ready[i] = insn;
2148	}
2149    }
2150
2151  /* Compact the ready list.  */
2152  if (new_ready < n_ready)
2153    for (i = j = 0; i < n_ready; i++)
2154      if (ready[i])
2155	ready[j++] = ready[i];
2156
2157  return new_ready;
2158}
2159
2160/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2161   dead_notes list.  */
2162
2163static void
2164create_reg_dead_note (reg, insn)
2165     rtx reg, insn;
2166{
2167  rtx link;
2168
2169  /* The number of registers killed after scheduling must be the same as the
2170     number of registers killed before scheduling.  The number of REG_DEAD
2171     notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2172     might become one DImode hard register REG_DEAD note, but the number of
2173     registers killed will be conserved.
2174
2175     We carefully remove REG_DEAD notes from the dead_notes list, so that
2176     there will be none left at the end.  If we run out early, then there
2177     is a bug somewhere in flow, combine and/or sched.  */
2178
2179  if (dead_notes == 0)
2180    {
2181#if 1
2182      abort ();
2183#else
2184      link = rtx_alloc (EXPR_LIST);
2185      PUT_REG_NOTE_KIND (link, REG_DEAD);
2186#endif
2187    }
2188  else
2189    {
2190      /* Number of regs killed by REG.  */
2191      int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2192			 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2193      /* Number of regs killed by REG_DEAD notes taken off the list.  */
2194      int reg_note_regs;
2195
2196      link = dead_notes;
2197      reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2198		       : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2199					   GET_MODE (XEXP (link, 0))));
2200      while (reg_note_regs < regs_killed)
2201	{
2202	  /* LINK might be zero if we killed more registers after scheduling
2203	     than before, and the last hard register we kill is actually
2204	     multiple hard regs.  */
2205	  if (link == NULL_RTX)
2206	    abort ();
2207
2208	  link = XEXP (link, 1);
2209	  reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2210			    : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2211						GET_MODE (XEXP (link, 0))));
2212	}
2213      dead_notes = XEXP (link, 1);
2214
2215      /* If we took too many regs kills off, put the extra ones back.  */
2216      while (reg_note_regs > regs_killed)
2217	{
2218	  rtx temp_reg, temp_link;
2219
2220	  temp_reg = gen_rtx_REG (word_mode, 0);
2221	  temp_link = rtx_alloc (EXPR_LIST);
2222	  PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2223	  XEXP (temp_link, 0) = temp_reg;
2224	  XEXP (temp_link, 1) = dead_notes;
2225	  dead_notes = temp_link;
2226	  reg_note_regs--;
2227	}
2228    }
2229
2230  XEXP (link, 0) = reg;
2231  XEXP (link, 1) = REG_NOTES (insn);
2232  REG_NOTES (insn) = link;
2233}
2234
2235/* Subroutine on attach_deaths_insn--handles the recursive search
2236   through INSN.  If SET_P is true, then x is being modified by the insn.  */
2237
2238static void
2239attach_deaths (x, insn, set_p)
2240     rtx x;
2241     rtx insn;
2242     int set_p;
2243{
2244  register int i;
2245  register int j;
2246  register enum rtx_code code;
2247  register char *fmt;
2248
2249  if (x == 0)
2250    return;
2251
2252  code = GET_CODE (x);
2253
2254  switch (code)
2255    {
2256    case CONST_INT:
2257    case CONST_DOUBLE:
2258    case LABEL_REF:
2259    case SYMBOL_REF:
2260    case CONST:
2261    case CODE_LABEL:
2262    case PC:
2263    case CC0:
2264      /* Get rid of the easy cases first.  */
2265      return;
2266
2267    case REG:
2268      {
2269	/* If the register dies in this insn, queue that note, and mark
2270	   this register as needing to die.  */
2271	/* This code is very similar to mark_used_1 (if set_p is false)
2272	   and mark_set_1 (if set_p is true) in flow.c.  */
2273
2274	register int regno;
2275	int some_needed;
2276	int all_needed;
2277
2278	if (set_p)
2279	  return;
2280
2281	regno = REGNO (x);
2282	all_needed = some_needed = REGNO_REG_SET_P (old_live_regs, regno);
2283	if (regno < FIRST_PSEUDO_REGISTER)
2284	  {
2285	    int n;
2286
2287	    n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2288	    while (--n > 0)
2289	      {
2290		int needed = (REGNO_REG_SET_P (old_live_regs, regno + n));
2291		some_needed |= needed;
2292		all_needed &= needed;
2293	      }
2294	  }
2295
2296	/* If it wasn't live before we started, then add a REG_DEAD note.
2297	   We must check the previous lifetime info not the current info,
2298	   because we may have to execute this code several times, e.g.
2299	   once for a clobber (which doesn't add a note) and later
2300	   for a use (which does add a note).
2301
2302	   Always make the register live.  We must do this even if it was
2303	   live before, because this may be an insn which sets and uses
2304	   the same register, in which case the register has already been
2305	   killed, so we must make it live again.
2306
2307	   Global registers are always live, and should never have a REG_DEAD
2308	   note added for them, so none of the code below applies to them.  */
2309
2310	if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2311	  {
2312	    /* Never add REG_DEAD notes for STACK_POINTER_REGNUM
2313	       since it's always considered to be live.  Similarly
2314	       for FRAME_POINTER_REGNUM if a frame pointer is needed
2315	       and for ARG_POINTER_REGNUM if it is fixed.  */
2316	    if (! (regno == FRAME_POINTER_REGNUM
2317		   && (! reload_completed || frame_pointer_needed))
2318#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2319		&& ! (regno == HARD_FRAME_POINTER_REGNUM
2320		      && (! reload_completed || frame_pointer_needed))
2321#endif
2322#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2323		&& ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2324#endif
2325		&& regno != STACK_POINTER_REGNUM)
2326	      {
2327		if (! all_needed && ! dead_or_set_p (insn, x))
2328		  {
2329		    /* Check for the case where the register dying partially
2330		       overlaps the register set by this insn.  */
2331		    if (regno < FIRST_PSEUDO_REGISTER
2332			&& HARD_REGNO_NREGS (regno, GET_MODE (x)) > 1)
2333		      {
2334			int n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2335			while (--n >= 0)
2336			  some_needed |= dead_or_set_regno_p (insn, regno + n);
2337		      }
2338
2339		    /* If none of the words in X is needed, make a REG_DEAD
2340		       note.  Otherwise, we must make partial REG_DEAD
2341		       notes.  */
2342		    if (! some_needed)
2343		      create_reg_dead_note (x, insn);
2344		    else
2345		      {
2346			int i;
2347
2348			/* Don't make a REG_DEAD note for a part of a
2349			   register that is set in the insn.  */
2350			for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2351			     i >= 0; i--)
2352			  if (! REGNO_REG_SET_P (old_live_regs, regno + i)
2353			      && ! dead_or_set_regno_p (insn, regno + i))
2354			    create_reg_dead_note (gen_rtx_REG (reg_raw_mode[regno + i],
2355							       regno + i),
2356						  insn);
2357		      }
2358		  }
2359	      }
2360
2361	    if (regno < FIRST_PSEUDO_REGISTER)
2362	      {
2363		int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2364		while (--j >= 0)
2365		  {
2366		    CLEAR_REGNO_REG_SET (bb_dead_regs, regno + j);
2367		    SET_REGNO_REG_SET (bb_live_regs, regno + j);
2368		  }
2369	      }
2370	    else
2371	      {
2372		CLEAR_REGNO_REG_SET (bb_dead_regs, regno);
2373		SET_REGNO_REG_SET (bb_live_regs, regno);
2374	      }
2375	  }
2376	return;
2377      }
2378
2379    case MEM:
2380      /* Handle tail-recursive case.  */
2381      attach_deaths (XEXP (x, 0), insn, 0);
2382      return;
2383
2384    case SUBREG:
2385      attach_deaths (SUBREG_REG (x), insn,
2386		     set_p && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2387			       <= UNITS_PER_WORD)
2388			       || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
2389				   == GET_MODE_SIZE (GET_MODE ((x))))));
2390      return;
2391
2392    case STRICT_LOW_PART:
2393      attach_deaths (XEXP (x, 0), insn, 0);
2394      return;
2395
2396    case ZERO_EXTRACT:
2397    case SIGN_EXTRACT:
2398      attach_deaths (XEXP (x, 0), insn, 0);
2399      attach_deaths (XEXP (x, 1), insn, 0);
2400      attach_deaths (XEXP (x, 2), insn, 0);
2401      return;
2402
2403    default:
2404      /* Other cases: walk the insn.  */
2405      fmt = GET_RTX_FORMAT (code);
2406      for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2407	{
2408	  if (fmt[i] == 'e')
2409	    attach_deaths (XEXP (x, i), insn, 0);
2410	  else if (fmt[i] == 'E')
2411	    for (j = 0; j < XVECLEN (x, i); j++)
2412	      attach_deaths (XVECEXP (x, i, j), insn, 0);
2413	}
2414    }
2415}
2416
2417/* After INSN has executed, add register death notes for each register
2418   that is dead after INSN.  */
2419
2420static void
2421attach_deaths_insn (insn)
2422     rtx insn;
2423{
2424  rtx x = PATTERN (insn);
2425  register RTX_CODE code = GET_CODE (x);
2426  rtx link;
2427
2428  if (code == SET)
2429    {
2430      attach_deaths (SET_SRC (x), insn, 0);
2431
2432      /* A register might die here even if it is the destination, e.g.
2433	 it is the target of a volatile read and is otherwise unused.
2434	 Hence we must always call attach_deaths for the SET_DEST.  */
2435      attach_deaths (SET_DEST (x), insn, 1);
2436    }
2437  else if (code == PARALLEL)
2438    {
2439      register int i;
2440      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2441	{
2442	  code = GET_CODE (XVECEXP (x, 0, i));
2443	  if (code == SET)
2444	    {
2445	      attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2446
2447	      attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2448	    }
2449	  /* Flow does not add REG_DEAD notes to registers that die in
2450	     clobbers, so we can't either.  */
2451	  else if (code != CLOBBER)
2452	    attach_deaths (XVECEXP (x, 0, i), insn, 0);
2453	}
2454    }
2455  /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
2456     MEM being clobbered, just like flow.  */
2457  else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
2458    attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
2459  /* Otherwise don't add a death note to things being clobbered.  */
2460  else if (code != CLOBBER)
2461    attach_deaths (x, insn, 0);
2462
2463  /* Make death notes for things used in the called function.  */
2464  if (GET_CODE (insn) == CALL_INSN)
2465    for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2466      attach_deaths (XEXP (XEXP (link, 0), 0), insn,
2467		     GET_CODE (XEXP (link, 0)) == CLOBBER);
2468}
2469
2470/* Delete notes beginning with INSN and maybe put them in the chain
2471   of notes ended by NOTE_LIST.
2472   Returns the insn following the notes.  */
2473
2474static rtx
2475unlink_notes (insn, tail)
2476     rtx insn, tail;
2477{
2478  rtx prev = PREV_INSN (insn);
2479
2480  while (insn != tail && GET_CODE (insn) == NOTE)
2481    {
2482      rtx next = NEXT_INSN (insn);
2483      /* Delete the note from its current position.  */
2484      if (prev)
2485	NEXT_INSN (prev) = next;
2486      if (next)
2487	PREV_INSN (next) = prev;
2488
2489      if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2490	/* Record line-number notes so they can be reused.  */
2491	LINE_NOTE (insn) = insn;
2492
2493      /* Don't save away NOTE_INSN_SETJMPs, because they must remain
2494	 immediately after the call they follow.  We use a fake
2495	 (REG_DEAD (const_int -1)) note to remember them.
2496	 Likewise with NOTE_INSN_{LOOP,EHREGION}_{BEG, END}.  */
2497      else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
2498	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
2499	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
2500	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
2501	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
2502	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
2503	       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
2504	{
2505	  /* Insert the note at the end of the notes list.  */
2506	  PREV_INSN (insn) = note_list;
2507	  if (note_list)
2508	    NEXT_INSN (note_list) = insn;
2509	  note_list = insn;
2510	}
2511
2512      insn = next;
2513    }
2514  return insn;
2515}
2516
2517/* Constructor for `sometimes' data structure.  */
2518
2519static int
2520new_sometimes_live (regs_sometimes_live, regno, sometimes_max)
2521     struct sometimes *regs_sometimes_live;
2522     int regno;
2523     int sometimes_max;
2524{
2525  register struct sometimes *p;
2526
2527  /* There should never be a register greater than max_regno here.  If there
2528     is, it means that a define_split has created a new pseudo reg.  This
2529     is not allowed, since there will not be flow info available for any
2530     new register, so catch the error here.  */
2531  if (regno >= max_regno)
2532    abort ();
2533
2534  p = &regs_sometimes_live[sometimes_max];
2535  p->regno = regno;
2536  p->live_length = 0;
2537  p->calls_crossed = 0;
2538  sometimes_max++;
2539  return sometimes_max;
2540}
2541
2542/* Count lengths of all regs we are currently tracking,
2543   and find new registers no longer live.  */
2544
2545static void
2546finish_sometimes_live (regs_sometimes_live, sometimes_max)
2547     struct sometimes *regs_sometimes_live;
2548     int sometimes_max;
2549{
2550  int i;
2551
2552  for (i = 0; i < sometimes_max; i++)
2553    {
2554      register struct sometimes *p = &regs_sometimes_live[i];
2555      int regno = p->regno;
2556
2557      sched_reg_live_length[regno] += p->live_length;
2558      sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2559    }
2560}
2561
2562/* Search INSN for fake REG_DEAD note pairs for NOTE_INSN_SETJMP,
2563   NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2564   NOTEs.  The REG_DEAD note following first one is contains the saved
2565   value for NOTE_BLOCK_NUMBER which is useful for
2566   NOTE_INSN_EH_REGION_{BEG,END} NOTEs.  LAST is the last instruction
2567   output by the instruction scheduler.  Return the new value of LAST.  */
2568
2569static rtx
2570reemit_notes (insn, last)
2571     rtx insn;
2572     rtx last;
2573{
2574  rtx note;
2575
2576  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2577    {
2578      if (REG_NOTE_KIND (note) == REG_DEAD
2579	  && GET_CODE (XEXP (note, 0)) == CONST_INT)
2580	{
2581	  if (INTVAL (XEXP (note, 0)) == NOTE_INSN_SETJMP)
2582	    {
2583	      CONST_CALL_P (emit_note_after (INTVAL (XEXP (note, 0)), insn))
2584		= CONST_CALL_P (note);
2585	      remove_note (insn, note);
2586	      note = XEXP (note, 1);
2587	    }
2588	  else
2589	    {
2590	      last = emit_note_before (INTVAL (XEXP (note, 0)), last);
2591	      remove_note (insn, note);
2592	      note = XEXP (note, 1);
2593	      NOTE_BLOCK_NUMBER (last) = INTVAL (XEXP (note, 0));
2594	    }
2595	  remove_note (insn, note);
2596	}
2597    }
2598  return last;
2599}
2600
2601/* Use modified list scheduling to rearrange insns in basic block
2602   B.  FILE, if nonzero, is where we dump interesting output about
2603   this pass.  */
2604
2605static void
2606schedule_block (b, file)
2607     int b;
2608     FILE *file;
2609{
2610  rtx insn, last;
2611  rtx *ready, link;
2612  int i, j, n_ready = 0, new_ready, n_insns;
2613  int sched_n_insns = 0;
2614  int clock;
2615#define NEED_NOTHING	0
2616#define NEED_HEAD	1
2617#define NEED_TAIL	2
2618  int new_needs;
2619
2620  /* HEAD and TAIL delimit the region being scheduled.  */
2621  rtx head = BLOCK_HEAD (b);
2622  rtx tail = BLOCK_END (b);
2623  /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2624     being scheduled.  When the insns have been ordered,
2625     these insns delimit where the new insns are to be
2626     spliced back into the insn chain.  */
2627  rtx next_tail;
2628  rtx prev_head;
2629
2630  /* Keep life information accurate.  */
2631  register struct sometimes *regs_sometimes_live;
2632  int sometimes_max;
2633
2634  if (file)
2635    fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2636	     b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
2637
2638  i = max_reg_num ();
2639  reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2640  bzero ((char *) reg_last_uses, i * sizeof (rtx));
2641  reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2642  bzero ((char *) reg_last_sets, i * sizeof (rtx));
2643  reg_pending_sets = ALLOCA_REG_SET ();
2644  CLEAR_REG_SET (reg_pending_sets);
2645  reg_pending_sets_all = 0;
2646  clear_units ();
2647
2648#if 0
2649  /* We used to have code to avoid getting parameters moved from hard
2650     argument registers into pseudos.
2651
2652     However, it was removed when it proved to be of marginal benefit and
2653     caused problems because of different notions of what the "head" insn
2654     was.  */
2655
2656  /* Remove certain insns at the beginning from scheduling,
2657     by advancing HEAD.  */
2658
2659  /* At the start of a function, before reload has run, don't delay getting
2660     parameters from hard registers into pseudo registers.  */
2661  if (reload_completed == 0 && b == 0)
2662    {
2663      while (head != tail
2664	     && GET_CODE (head) == NOTE
2665	     && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2666	head = NEXT_INSN (head);
2667      while (head != tail
2668	     && GET_CODE (head) == INSN
2669	     && GET_CODE (PATTERN (head)) == SET)
2670	{
2671	  rtx src = SET_SRC (PATTERN (head));
2672	  while (GET_CODE (src) == SUBREG
2673		 || GET_CODE (src) == SIGN_EXTEND
2674		 || GET_CODE (src) == ZERO_EXTEND
2675		 || GET_CODE (src) == SIGN_EXTRACT
2676		 || GET_CODE (src) == ZERO_EXTRACT)
2677	    src = XEXP (src, 0);
2678	  if (GET_CODE (src) != REG
2679	      || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2680	    break;
2681	  /* Keep this insn from ever being scheduled.  */
2682	  INSN_REF_COUNT (head) = 1;
2683	  head = NEXT_INSN (head);
2684	}
2685    }
2686#endif
2687
2688  /* Don't include any notes or labels at the beginning of the
2689     basic block, or notes at the ends of basic blocks.  */
2690  while (head != tail)
2691    {
2692      if (GET_CODE (head) == NOTE)
2693	head = NEXT_INSN (head);
2694      else if (GET_CODE (tail) == NOTE)
2695	tail = PREV_INSN (tail);
2696      else if (GET_CODE (head) == CODE_LABEL)
2697	head = NEXT_INSN (head);
2698      else break;
2699    }
2700  /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2701     to schedule this block.  */
2702  if (head == tail
2703      && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2704    goto ret;
2705
2706#if 0
2707  /* This short-cut doesn't work.  It does not count call insns crossed by
2708     registers in reg_sometimes_live.  It does not mark these registers as
2709     dead if they die in this block.  It does not mark these registers live
2710     (or create new reg_sometimes_live entries if necessary) if they are born
2711     in this block.
2712
2713     The easy solution is to just always schedule a block.  This block only
2714     has one insn, so this won't slow down this pass by much.  */
2715
2716  if (head == tail)
2717    goto ret;
2718#endif
2719
2720  /* Now HEAD through TAIL are the insns actually to be rearranged;
2721     Let PREV_HEAD and NEXT_TAIL enclose them.  */
2722  prev_head = PREV_INSN (head);
2723  next_tail = NEXT_INSN (tail);
2724
2725  /* Initialize basic block data structures.  */
2726  dead_notes = 0;
2727  pending_read_insns = 0;
2728  pending_read_mems = 0;
2729  pending_write_insns = 0;
2730  pending_write_mems = 0;
2731  pending_lists_length = 0;
2732  last_pending_memory_flush = 0;
2733  last_function_call = 0;
2734  last_scheduled_insn = 0;
2735
2736  LOG_LINKS (sched_before_next_call) = 0;
2737
2738  n_insns = sched_analyze (head, tail);
2739  if (n_insns == 0)
2740    {
2741      free_pending_lists ();
2742      goto ret;
2743    }
2744
2745  /* Allocate vector to hold insns to be rearranged (except those
2746     insns which are controlled by an insn with SCHED_GROUP_P set).
2747     All these insns are included between ORIG_HEAD and ORIG_TAIL,
2748     as those variables ultimately are set up.  */
2749  ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
2750
2751  /* TAIL is now the last of the insns to be rearranged.
2752     Put those insns into the READY vector.  */
2753  insn = tail;
2754
2755  /* For all branches, calls, uses, and cc0 setters, force them to remain
2756     in order at the end of the block by adding dependencies and giving
2757     the last a high priority.  There may be notes present, and prev_head
2758     may also be a note.
2759
2760     Branches must obviously remain at the end.  Calls should remain at the
2761     end since moving them results in worse register allocation.  Uses remain
2762     at the end to ensure proper register allocation.  cc0 setters remaim
2763     at the end because they can't be moved away from their cc0 user.  */
2764  last = 0;
2765  while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
2766	 || (GET_CODE (insn) == INSN
2767	     && (GET_CODE (PATTERN (insn)) == USE
2768#ifdef HAVE_cc0
2769		 || sets_cc0_p (PATTERN (insn))
2770#endif
2771		 ))
2772	 || GET_CODE (insn) == NOTE)
2773    {
2774      if (GET_CODE (insn) != NOTE)
2775	{
2776	  priority (insn);
2777	  if (last == 0)
2778	    {
2779	      ready[n_ready++] = insn;
2780	      INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
2781	      INSN_REF_COUNT (insn) = 0;
2782	    }
2783	  else if (! find_insn_list (insn, LOG_LINKS (last)))
2784	    {
2785	      add_dependence (last, insn, REG_DEP_ANTI);
2786	      INSN_REF_COUNT (insn)++;
2787	    }
2788	  last = insn;
2789
2790	  /* Skip over insns that are part of a group.  */
2791	  while (SCHED_GROUP_P (insn))
2792	    {
2793	      insn = prev_nonnote_insn (insn);
2794	      priority (insn);
2795	    }
2796	}
2797
2798      insn = PREV_INSN (insn);
2799      /* Don't overrun the bounds of the basic block.  */
2800      if (insn == prev_head)
2801	break;
2802    }
2803
2804  /* Assign priorities to instructions.  Also check whether they
2805     are in priority order already.  If so then I will be nonnegative.
2806     We use this shortcut only before reloading.  */
2807#if 0
2808  i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
2809#endif
2810
2811  for (; insn != prev_head; insn = PREV_INSN (insn))
2812    {
2813      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2814	{
2815	  priority (insn);
2816	  if (INSN_REF_COUNT (insn) == 0)
2817	    {
2818	      if (last == 0)
2819		ready[n_ready++] = insn;
2820	      else
2821		{
2822		  /* Make this dependent on the last of the instructions
2823		     that must remain in order at the end of the block.  */
2824		  add_dependence (last, insn, REG_DEP_ANTI);
2825		  INSN_REF_COUNT (insn) = 1;
2826		}
2827	    }
2828	  if (SCHED_GROUP_P (insn))
2829	    {
2830	      while (SCHED_GROUP_P (insn))
2831		{
2832		  insn = prev_nonnote_insn (insn);
2833		  priority (insn);
2834		}
2835	      continue;
2836	    }
2837#if 0
2838	  if (i < 0)
2839	    continue;
2840	  if (INSN_PRIORITY (insn) < i)
2841	    i = INSN_PRIORITY (insn);
2842	  else if (INSN_PRIORITY (insn) > i)
2843	    i = DONE_PRIORITY;
2844#endif
2845	}
2846    }
2847
2848#if 0
2849  /* This short-cut doesn't work.  It does not count call insns crossed by
2850     registers in reg_sometimes_live.  It does not mark these registers as
2851     dead if they die in this block.  It does not mark these registers live
2852     (or create new reg_sometimes_live entries if necessary) if they are born
2853     in this block.
2854
2855     The easy solution is to just always schedule a block.  These blocks tend
2856     to be very short, so this doesn't slow down this pass by much.  */
2857
2858  /* If existing order is good, don't bother to reorder.  */
2859  if (i != DONE_PRIORITY)
2860    {
2861      if (file)
2862	fprintf (file, ";; already scheduled\n");
2863
2864      if (reload_completed == 0)
2865	{
2866	  for (i = 0; i < sometimes_max; i++)
2867	    regs_sometimes_live[i].live_length += n_insns;
2868
2869	  finish_sometimes_live (regs_sometimes_live, sometimes_max);
2870	}
2871      free_pending_lists ();
2872      goto ret;
2873    }
2874#endif
2875
2876  /* Scan all the insns to be scheduled, removing NOTE insns
2877     and register death notes.
2878     Line number NOTE insns end up in NOTE_LIST.
2879     Register death notes end up in DEAD_NOTES.
2880
2881     Recreate the register life information for the end of this basic
2882     block.  */
2883
2884  if (reload_completed == 0)
2885    {
2886      COPY_REG_SET (bb_live_regs, BASIC_BLOCK (b)->global_live_at_start);
2887      CLEAR_REG_SET (bb_dead_regs);
2888
2889      if (b == 0)
2890	{
2891	  /* This is the first block in the function.  There may be insns
2892	     before head that we can't schedule.   We still need to examine
2893	     them though for accurate register lifetime analysis.  */
2894
2895	  /* We don't want to remove any REG_DEAD notes as the code below
2896	     does.  */
2897
2898	  for (insn = BLOCK_HEAD (b); insn != head;
2899	       insn = NEXT_INSN (insn))
2900	    if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2901	      {
2902		/* See if the register gets born here.  */
2903		/* We must check for registers being born before we check for
2904		   registers dying.  It is possible for a register to be born
2905		   and die in the same insn, e.g. reading from a volatile
2906		   memory location into an otherwise unused register.  Such
2907		   a register must be marked as dead after this insn.  */
2908		if (GET_CODE (PATTERN (insn)) == SET
2909		    || GET_CODE (PATTERN (insn)) == CLOBBER)
2910		  sched_note_set (PATTERN (insn), 0);
2911		else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2912		  {
2913		    int j;
2914		    for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2915		      if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
2916			  || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
2917			sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2918
2919		    /* ??? This code is obsolete and should be deleted.  It
2920		       is harmless though, so we will leave it in for now.  */
2921		    for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
2922		      if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
2923			sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
2924		  }
2925
2926		/* Each call clobbers (makes live) all call-clobbered regs
2927		   that are not global or fixed.  Note that the function-value
2928		   reg is a call_clobbered reg.  */
2929
2930		if (GET_CODE (insn) == CALL_INSN)
2931		  {
2932		    int j;
2933		    for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2934		      if (call_used_regs[j] && ! global_regs[j]
2935			  && ! fixed_regs[j])
2936			{
2937			  SET_REGNO_REG_SET (bb_live_regs, j);
2938			  CLEAR_REGNO_REG_SET (bb_dead_regs, j);
2939			}
2940		  }
2941
2942		for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2943		  {
2944		    if ((REG_NOTE_KIND (link) == REG_DEAD
2945			 || REG_NOTE_KIND (link) == REG_UNUSED)
2946			/* Verify that the REG_NOTE has a valid value.  */
2947			&& GET_CODE (XEXP (link, 0)) == REG)
2948		      {
2949			register int regno = REGNO (XEXP (link, 0));
2950
2951			if (regno < FIRST_PSEUDO_REGISTER)
2952			  {
2953			    int j = HARD_REGNO_NREGS (regno,
2954						      GET_MODE (XEXP (link, 0)));
2955			    while (--j >= 0)
2956			      {
2957				CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
2958				SET_REGNO_REG_SET (bb_dead_regs, regno + j);
2959			      }
2960			  }
2961			else
2962			  {
2963			    CLEAR_REGNO_REG_SET (bb_live_regs, regno);
2964			    SET_REGNO_REG_SET (bb_dead_regs, regno);
2965			  }
2966		      }
2967		  }
2968	      }
2969	}
2970    }
2971
2972  /* If debugging information is being produced, keep track of the line
2973     number notes for each insn.  */
2974  if (write_symbols != NO_DEBUG)
2975    {
2976      /* We must use the true line number for the first insn in the block
2977	 that was computed and saved at the start of this pass.  We can't
2978	 use the current line number, because scheduling of the previous
2979	 block may have changed the current line number.  */
2980      rtx line = line_note_head[b];
2981
2982      for (insn = BLOCK_HEAD (b);
2983	   insn != next_tail;
2984	   insn = NEXT_INSN (insn))
2985	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
2986	  line = insn;
2987	else
2988	  LINE_NOTE (insn) = line;
2989    }
2990
2991  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2992    {
2993      rtx prev, next, link;
2994
2995      /* Farm out notes.  This is needed to keep the debugger from
2996	 getting completely deranged.  */
2997      if (GET_CODE (insn) == NOTE)
2998	{
2999	  prev = insn;
3000	  insn = unlink_notes (insn, next_tail);
3001	  if (prev == tail)
3002	    abort ();
3003	  if (prev == head)
3004	    abort ();
3005	  if (insn == next_tail)
3006	    abort ();
3007	}
3008
3009      if (reload_completed == 0
3010	  && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3011	{
3012	  /* See if the register gets born here.  */
3013	  /* We must check for registers being born before we check for
3014	     registers dying.  It is possible for a register to be born and
3015	     die in the same insn, e.g. reading from a volatile memory
3016	     location into an otherwise unused register.  Such a register
3017	     must be marked as dead after this insn.  */
3018	  if (GET_CODE (PATTERN (insn)) == SET
3019	      || GET_CODE (PATTERN (insn)) == CLOBBER)
3020	    sched_note_set (PATTERN (insn), 0);
3021	  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3022	    {
3023	      int j;
3024	      for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3025		if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3026		    || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3027		  sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3028
3029	      /* ??? This code is obsolete and should be deleted.  It
3030		 is harmless though, so we will leave it in for now.  */
3031	      for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3032		if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3033		  sched_note_set (XVECEXP (PATTERN (insn), 0, j), 0);
3034	    }
3035
3036	  /* Each call clobbers (makes live) all call-clobbered regs that are
3037	     not global or fixed.  Note that the function-value reg is a
3038	     call_clobbered reg.  */
3039
3040	  if (GET_CODE (insn) == CALL_INSN)
3041	    {
3042	      int j;
3043	      for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
3044		if (call_used_regs[j] && ! global_regs[j]
3045		    && ! fixed_regs[j])
3046		  {
3047		    SET_REGNO_REG_SET (bb_live_regs, j);
3048		    CLEAR_REGNO_REG_SET (bb_dead_regs, j);
3049		  }
3050	    }
3051
3052	  /* Need to know what registers this insn kills.  */
3053	  for (prev = 0, link = REG_NOTES (insn); link; link = next)
3054	    {
3055	      next = XEXP (link, 1);
3056	      if ((REG_NOTE_KIND (link) == REG_DEAD
3057		   || REG_NOTE_KIND (link) == REG_UNUSED)
3058		  /* Verify that the REG_NOTE has a valid value.  */
3059		  && GET_CODE (XEXP (link, 0)) == REG)
3060		{
3061		  register int regno = REGNO (XEXP (link, 0));
3062
3063		  /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3064		     alone.  */
3065		  if (REG_NOTE_KIND (link) == REG_DEAD)
3066		    {
3067		      if (prev)
3068			XEXP (prev, 1) = next;
3069		      else
3070			REG_NOTES (insn) = next;
3071		      XEXP (link, 1) = dead_notes;
3072		      dead_notes = link;
3073		    }
3074		  else
3075		    prev = link;
3076
3077		  if (regno < FIRST_PSEUDO_REGISTER)
3078		    {
3079		      int j = HARD_REGNO_NREGS (regno,
3080						GET_MODE (XEXP (link, 0)));
3081		      while (--j >= 0)
3082			{
3083			  CLEAR_REGNO_REG_SET (bb_live_regs, regno + j);
3084			  SET_REGNO_REG_SET (bb_dead_regs, regno + j);
3085			}
3086		    }
3087		  else
3088		    {
3089		      CLEAR_REGNO_REG_SET (bb_live_regs, regno);
3090		      SET_REGNO_REG_SET (bb_dead_regs, regno);
3091		    }
3092		}
3093	      else
3094		prev = link;
3095	    }
3096	}
3097    }
3098
3099  if (reload_completed == 0)
3100    {
3101      /* Keep track of register lives.  */
3102      old_live_regs = ALLOCA_REG_SET ();
3103      regs_sometimes_live
3104	= (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3105      sometimes_max = 0;
3106
3107      /* Start with registers live at end.  */
3108      COPY_REG_SET (old_live_regs, bb_live_regs);
3109      EXECUTE_IF_SET_IN_REG_SET (bb_live_regs, 0, j,
3110				 {
3111				   sometimes_max
3112				     = new_sometimes_live (regs_sometimes_live,
3113							   j, sometimes_max);
3114				 });
3115    }
3116
3117  SCHED_SORT (ready, n_ready, 1);
3118
3119  if (file)
3120    {
3121      fprintf (file, ";; ready list initially:\n;; ");
3122      for (i = 0; i < n_ready; i++)
3123	fprintf (file, "%d ", INSN_UID (ready[i]));
3124      fprintf (file, "\n\n");
3125
3126      for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3127	if (INSN_PRIORITY (insn) > 0)
3128	  fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3129		   INSN_UID (insn), INSN_PRIORITY (insn),
3130		   INSN_REF_COUNT (insn));
3131    }
3132
3133  /* Now HEAD and TAIL are going to become disconnected
3134     entirely from the insn chain.  */
3135  tail = 0;
3136
3137  /* Q_SIZE will always be zero here.  */
3138  q_ptr = 0; clock = 0;
3139  bzero ((char *) insn_queue, sizeof (insn_queue));
3140
3141  /* Now, perform list scheduling.  */
3142
3143  /* Where we start inserting insns is after TAIL.  */
3144  last = next_tail;
3145
3146  new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
3147	       ? NEED_HEAD : NEED_NOTHING);
3148  if (PREV_INSN (next_tail) == BLOCK_END (b))
3149    new_needs |= NEED_TAIL;
3150
3151  new_ready = n_ready;
3152  while (sched_n_insns < n_insns)
3153    {
3154      q_ptr = NEXT_Q (q_ptr); clock++;
3155
3156      /* Add all pending insns that can be scheduled without stalls to the
3157	 ready list.  */
3158      for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3159	{
3160	  if (file)
3161	    fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3162		     INSN_UID (insn), INSN_UID (last), clock);
3163	  ready[new_ready++] = insn;
3164	  q_size -= 1;
3165	}
3166      insn_queue[q_ptr] = 0;
3167
3168      /* If there are no ready insns, stall until one is ready and add all
3169	 of the pending insns at that point to the ready list.  */
3170      if (new_ready == 0)
3171	{
3172	  register int stalls;
3173
3174	  for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3175	    if ((insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
3176	      {
3177		for (; insn; insn = NEXT_INSN (insn))
3178		  {
3179		    if (file)
3180		      fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3181			       INSN_UID (insn), INSN_UID (last), stalls, clock);
3182		    ready[new_ready++] = insn;
3183		    q_size -= 1;
3184		  }
3185		insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3186		break;
3187	      }
3188
3189	  q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3190	}
3191
3192      /* There should be some instructions waiting to fire.  */
3193      if (new_ready == 0)
3194	abort ();
3195
3196      if (file)
3197	{
3198	  fprintf (file, ";; ready list at T-%d:", clock);
3199	  for (i = 0; i < new_ready; i++)
3200	    fprintf (file, " %d (%x)",
3201		     INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3202	}
3203
3204      /* Sort the ready list and choose the best insn to schedule.  Select
3205	 which insn should issue in this cycle and queue those that are
3206	 blocked by function unit hazards.
3207
3208	 N_READY holds the number of items that were scheduled the last time,
3209	 minus the one instruction scheduled on the last loop iteration; it
3210	 is not modified for any other reason in this loop.  */
3211
3212      SCHED_SORT (ready, new_ready, n_ready);
3213      if (MAX_BLOCKAGE > 1)
3214	{
3215	  new_ready = schedule_select (ready, new_ready, clock, file);
3216	  if (new_ready == 0)
3217	    {
3218	      if (file)
3219		fprintf (file, "\n");
3220	      /* We must set n_ready here, to ensure that sorting always
3221		 occurs when we come back to the SCHED_SORT line above.  */
3222	      n_ready = 0;
3223	      continue;
3224	    }
3225	}
3226      n_ready = new_ready;
3227      last_scheduled_insn = insn = ready[0];
3228
3229      /* The first insn scheduled becomes the new tail.  */
3230      if (tail == 0)
3231	tail = insn;
3232
3233      if (file)
3234	{
3235	  fprintf (file, ", now");
3236	  for (i = 0; i < n_ready; i++)
3237	    fprintf (file, " %d", INSN_UID (ready[i]));
3238	  fprintf (file, "\n");
3239	}
3240
3241      if (DONE_PRIORITY_P (insn))
3242	abort ();
3243
3244      if (reload_completed == 0)
3245	{
3246	  /* Process this insn, and each insn linked to this one which must
3247	     be immediately output after this insn.  */
3248	  do
3249	    {
3250	      /* First we kill registers set by this insn, and then we
3251		 make registers used by this insn live.  This is the opposite
3252		 order used above because we are traversing the instructions
3253		 backwards.  */
3254
3255	      /* Strictly speaking, we should scan REG_UNUSED notes and make
3256		 every register mentioned there live, however, we will just
3257		 kill them again immediately below, so there doesn't seem to
3258		 be any reason why we bother to do this.  */
3259
3260	      /* See if this is the last notice we must take of a register.  */
3261	      if (GET_CODE (PATTERN (insn)) == SET
3262		  || GET_CODE (PATTERN (insn)) == CLOBBER)
3263		sched_note_set (PATTERN (insn), 1);
3264	      else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3265		{
3266		  int j;
3267		  for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3268		    if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3269			|| GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3270		      sched_note_set (XVECEXP (PATTERN (insn), 0, j), 1);
3271		}
3272
3273	      /* This code keeps life analysis information up to date.  */
3274	      if (GET_CODE (insn) == CALL_INSN)
3275		{
3276		  register struct sometimes *p;
3277
3278		  /* A call kills all call used registers that are not
3279		     global or fixed, except for those mentioned in the call
3280		     pattern which will be made live again later.  */
3281		  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3282		    if (call_used_regs[i] && ! global_regs[i]
3283			&& ! fixed_regs[i])
3284		      {
3285			CLEAR_REGNO_REG_SET (bb_live_regs, i);
3286			SET_REGNO_REG_SET (bb_dead_regs, i);
3287		      }
3288
3289		  /* Regs live at the time of a call instruction must not
3290		     go in a register clobbered by calls.  Record this for
3291		     all regs now live.  Note that insns which are born or
3292		     die in a call do not cross a call, so this must be done
3293		     after the killings (above) and before the births
3294		     (below).  */
3295		  p = regs_sometimes_live;
3296		  for (i = 0; i < sometimes_max; i++, p++)
3297		    if (REGNO_REG_SET_P (bb_live_regs, p->regno))
3298		      p->calls_crossed += 1;
3299		}
3300
3301	      /* Make every register used live, and add REG_DEAD notes for
3302		 registers which were not live before we started.  */
3303	      attach_deaths_insn (insn);
3304
3305	      /* Find registers now made live by that instruction.  */
3306	      EXECUTE_IF_AND_COMPL_IN_REG_SET (bb_live_regs, old_live_regs, 0, i,
3307					       {
3308						 sometimes_max
3309						   = new_sometimes_live (regs_sometimes_live,
3310									 i, sometimes_max);
3311					       });
3312	      IOR_REG_SET (old_live_regs, bb_live_regs);
3313
3314	      /* Count lengths of all regs we are worrying about now,
3315		 and handle registers no longer live.  */
3316
3317	      for (i = 0; i < sometimes_max; i++)
3318		{
3319		  register struct sometimes *p = &regs_sometimes_live[i];
3320		  int regno = p->regno;
3321
3322		  p->live_length += 1;
3323
3324		  if (!REGNO_REG_SET_P (bb_live_regs, p->regno))
3325		    {
3326		      /* This is the end of one of this register's lifetime
3327			 segments.  Save the lifetime info collected so far,
3328			 and clear its bit in the old_live_regs entry.  */
3329		      sched_reg_live_length[regno] += p->live_length;
3330		      sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3331		      CLEAR_REGNO_REG_SET (old_live_regs, p->regno);
3332
3333		      /* Delete the reg_sometimes_live entry for this reg by
3334			 copying the last entry over top of it.  */
3335		      *p = regs_sometimes_live[--sometimes_max];
3336		      /* ...and decrement i so that this newly copied entry
3337			 will be processed.  */
3338		      i--;
3339		    }
3340		}
3341
3342	      link = insn;
3343	      insn = PREV_INSN (insn);
3344	    }
3345	  while (SCHED_GROUP_P (link));
3346
3347	  /* Set INSN back to the insn we are scheduling now.  */
3348	  insn = ready[0];
3349	}
3350
3351      /* Schedule INSN.  Remove it from the ready list.  */
3352      ready += 1;
3353      n_ready -= 1;
3354
3355      sched_n_insns += 1;
3356      NEXT_INSN (insn) = last;
3357      PREV_INSN (last) = insn;
3358
3359      /* Everything that precedes INSN now either becomes "ready", if
3360	 it can execute immediately before INSN, or "pending", if
3361	 there must be a delay.  Give INSN high enough priority that
3362	 at least one (maybe more) reg-killing insns can be launched
3363	 ahead of all others.  Mark INSN as scheduled by changing its
3364	 priority to -1.  */
3365      INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3366      new_ready = schedule_insn (insn, ready, n_ready, clock);
3367      INSN_PRIORITY (insn) = DONE_PRIORITY;
3368
3369      /* Schedule all prior insns that must not be moved.  */
3370      if (SCHED_GROUP_P (insn))
3371	{
3372	  /* Disable these insns from being launched, in case one of the
3373	     insns in the group has a dependency on an earlier one.  */
3374	  link = insn;
3375	  while (SCHED_GROUP_P (link))
3376	    {
3377	      /* Disable these insns from being launched by anybody.  */
3378	      link = PREV_INSN (link);
3379	      INSN_REF_COUNT (link) = 0;
3380	    }
3381
3382	  /* Now handle each group insn like the main insn was handled
3383	     above.  */
3384	  link = insn;
3385	  while (SCHED_GROUP_P (link))
3386	    {
3387	      link = PREV_INSN (link);
3388
3389	      sched_n_insns += 1;
3390
3391	      /* ??? Why don't we set LAUNCH_PRIORITY here?  */
3392	      new_ready = schedule_insn (link, ready, new_ready, clock);
3393	      INSN_PRIORITY (link) = DONE_PRIORITY;
3394	    }
3395	}
3396
3397      /* Put back NOTE_INSN_SETJMP,
3398         NOTE_INSN_{LOOP,EHREGION}_{BEGIN,END} notes.  */
3399
3400      /* To prime the loop.  We need to handle INSN and all the insns in the
3401         sched group.  */
3402      last = NEXT_INSN (insn);
3403      do
3404	{
3405	  insn = PREV_INSN (last);
3406
3407	  /* Maintain a valid chain so emit_note_before works.
3408	     This is necessary because PREV_INSN (insn) isn't valid
3409	     (if ! SCHED_GROUP_P) and if it points to an insn already
3410	     scheduled, a circularity will result.  */
3411	  if (! SCHED_GROUP_P (insn))
3412	    {
3413	      NEXT_INSN (prev_head) = insn;
3414	      PREV_INSN (insn) = prev_head;
3415	    }
3416
3417	  last = reemit_notes (insn, insn);
3418	}
3419      while (SCHED_GROUP_P (insn));
3420    }
3421  if (q_size != 0)
3422    abort ();
3423
3424  if (reload_completed == 0)
3425    finish_sometimes_live (regs_sometimes_live, sometimes_max);
3426
3427  /* HEAD is now the first insn in the chain of insns that
3428     been scheduled by the loop above.
3429     TAIL is the last of those insns.  */
3430  head = last;
3431
3432  /* NOTE_LIST is the end of a chain of notes previously found
3433     among the insns.  Insert them at the beginning of the insns.  */
3434  if (note_list != 0)
3435    {
3436      rtx note_head = note_list;
3437      while (PREV_INSN (note_head))
3438	note_head = PREV_INSN (note_head);
3439
3440      PREV_INSN (head) = note_list;
3441      NEXT_INSN (note_list) = head;
3442      head = note_head;
3443    }
3444
3445  /* There should be no REG_DEAD notes leftover at the end.
3446     In practice, this can occur as the result of bugs in flow, combine.c,
3447     and/or sched.c.  The values of the REG_DEAD notes remaining are
3448     meaningless, because dead_notes is just used as a free list.  */
3449#if 1
3450  if (dead_notes != 0)
3451    abort ();
3452#endif
3453
3454  if (new_needs & NEED_HEAD)
3455    BLOCK_HEAD (b) = head;
3456  PREV_INSN (head) = prev_head;
3457  NEXT_INSN (prev_head) = head;
3458
3459  if (new_needs & NEED_TAIL)
3460    BLOCK_END (b) = tail;
3461  NEXT_INSN (tail) = next_tail;
3462  PREV_INSN (next_tail) = tail;
3463
3464  /* Restore the line-number notes of each insn.  */
3465  if (write_symbols != NO_DEBUG)
3466    {
3467      rtx line, note, prev, new;
3468      int notes = 0;
3469
3470      head = BLOCK_HEAD (b);
3471      next_tail = NEXT_INSN (BLOCK_END (b));
3472
3473      /* Determine the current line-number.  We want to know the current
3474	 line number of the first insn of the block here, in case it is
3475	 different from the true line number that was saved earlier.  If
3476	 different, then we need a line number note before the first insn
3477	 of this block.  If it happens to be the same, then we don't want to
3478	 emit another line number note here.  */
3479      for (line = head; line; line = PREV_INSN (line))
3480	if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3481	  break;
3482
3483      /* Walk the insns keeping track of the current line-number and inserting
3484	 the line-number notes as needed.  */
3485      for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3486	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3487	  line = insn;
3488      /* This used to emit line number notes before every non-deleted note.
3489	 However, this confuses a debugger, because line notes not separated
3490	 by real instructions all end up at the same address.  I can find no
3491	 use for line number notes before other notes, so none are emitted.  */
3492	else if (GET_CODE (insn) != NOTE
3493		 && (note = LINE_NOTE (insn)) != 0
3494		 && note != line
3495		 && (line == 0
3496		     || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3497		     || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3498	  {
3499	    line = note;
3500	    prev = PREV_INSN (insn);
3501	    if (LINE_NOTE (note))
3502	      {
3503		/* Re-use the original line-number note.  */
3504		LINE_NOTE (note) = 0;
3505		PREV_INSN (note) = prev;
3506		NEXT_INSN (prev) = note;
3507		PREV_INSN (insn) = note;
3508		NEXT_INSN (note) = insn;
3509	      }
3510	    else
3511	      {
3512		notes++;
3513		new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3514		NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3515		RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
3516	      }
3517	  }
3518      if (file && notes)
3519	fprintf (file, ";; added %d line-number notes\n", notes);
3520    }
3521
3522  if (file)
3523    {
3524      fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3525	       clock, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)));
3526    }
3527
3528  /* Yow! We're done!  */
3529  free_pending_lists ();
3530
3531ret:
3532  FREE_REG_SET (reg_pending_sets);
3533  FREE_REG_SET (old_live_regs);
3534
3535  return;
3536}
3537
3538/* Subroutine of update_flow_info.  Determines whether any new REG_NOTEs are
3539   needed for the hard register mentioned in the note.  This can happen
3540   if the reference to the hard register in the original insn was split into
3541   several smaller hard register references in the split insns.  */
3542
3543static void
3544split_hard_reg_notes (note, first, last)
3545  rtx note, first, last;
3546{
3547  rtx reg, temp, link;
3548  int n_regs, i, new_reg;
3549  rtx insn;
3550
3551  /* Assume that this is a REG_DEAD note.  */
3552  if (REG_NOTE_KIND (note) != REG_DEAD)
3553    abort ();
3554
3555  reg = XEXP (note, 0);
3556
3557  n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3558
3559  for (i = 0; i < n_regs; i++)
3560    {
3561      new_reg = REGNO (reg) + i;
3562
3563      /* Check for references to new_reg in the split insns.  */
3564      for (insn = last; ; insn = PREV_INSN (insn))
3565	{
3566	  if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3567	      && (temp = regno_use_in (new_reg, PATTERN (insn))))
3568	    {
3569	      /* Create a new reg dead note here.  */
3570	      link = rtx_alloc (EXPR_LIST);
3571	      PUT_REG_NOTE_KIND (link, REG_DEAD);
3572	      XEXP (link, 0) = temp;
3573	      XEXP (link, 1) = REG_NOTES (insn);
3574	      REG_NOTES (insn) = link;
3575
3576	      /* If killed multiple registers here, then add in the excess.  */
3577	      i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3578
3579	      break;
3580	    }
3581	  /* It isn't mentioned anywhere, so no new reg note is needed for
3582	     this register.  */
3583	  if (insn == first)
3584	    break;
3585	}
3586    }
3587}
3588
3589/* Subroutine of update_flow_info.  Determines whether a SET or CLOBBER in an
3590   insn created by splitting needs a REG_DEAD or REG_UNUSED note added.  */
3591
3592static void
3593new_insn_dead_notes (pat, insn, last, orig_insn)
3594     rtx pat, insn, last, orig_insn;
3595{
3596  rtx dest, tem, set;
3597
3598  /* PAT is either a CLOBBER or a SET here.  */
3599  dest = XEXP (pat, 0);
3600
3601  while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3602	 || GET_CODE (dest) == STRICT_LOW_PART
3603	 || GET_CODE (dest) == SIGN_EXTRACT)
3604    dest = XEXP (dest, 0);
3605
3606  if (GET_CODE (dest) == REG)
3607    {
3608      /* If the original insn already used this register, we may not add new
3609         notes for it.  One example for a split that needs this test is
3610	 when a multi-word memory access with register-indirect addressing
3611	 is split into multiple memory accesses with auto-increment and
3612	 one adjusting add instruction for the address register.  */
3613      if (reg_referenced_p (dest, PATTERN (orig_insn)))
3614	return;
3615      for (tem = last; tem != insn; tem = PREV_INSN (tem))
3616	{
3617	  if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3618	      && reg_overlap_mentioned_p (dest, PATTERN (tem))
3619	      && (set = single_set (tem)))
3620	    {
3621	      rtx tem_dest = SET_DEST (set);
3622
3623	      while (GET_CODE (tem_dest) == ZERO_EXTRACT
3624		     || GET_CODE (tem_dest) == SUBREG
3625		     || GET_CODE (tem_dest) == STRICT_LOW_PART
3626		     || GET_CODE (tem_dest) == SIGN_EXTRACT)
3627		tem_dest = XEXP (tem_dest, 0);
3628
3629	      if (! rtx_equal_p (tem_dest, dest))
3630		{
3631		  /* Use the same scheme as combine.c, don't put both REG_DEAD
3632		     and REG_UNUSED notes on the same insn.  */
3633		  if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3634		      && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3635		    {
3636		      rtx note = rtx_alloc (EXPR_LIST);
3637		      PUT_REG_NOTE_KIND (note, REG_DEAD);
3638		      XEXP (note, 0) = dest;
3639		      XEXP (note, 1) = REG_NOTES (tem);
3640		      REG_NOTES (tem) = note;
3641		    }
3642		  /* The reg only dies in one insn, the last one that uses
3643		     it.  */
3644		  break;
3645		}
3646	      else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3647		/* We found an instruction that both uses the register,
3648		   and sets it, so no new REG_NOTE is needed for this set.  */
3649		break;
3650	    }
3651	}
3652      /* If this is a set, it must die somewhere, unless it is the dest of
3653	 the original insn, and hence is live after the original insn.  Abort
3654	 if it isn't supposed to be live after the original insn.
3655
3656	 If this is a clobber, then just add a REG_UNUSED note.  */
3657      if (tem == insn)
3658	{
3659	  int live_after_orig_insn = 0;
3660	  rtx pattern = PATTERN (orig_insn);
3661	  int i;
3662
3663	  if (GET_CODE (pat) == CLOBBER)
3664	    {
3665	      rtx note = rtx_alloc (EXPR_LIST);
3666	      PUT_REG_NOTE_KIND (note, REG_UNUSED);
3667	      XEXP (note, 0) = dest;
3668	      XEXP (note, 1) = REG_NOTES (insn);
3669	      REG_NOTES (insn) = note;
3670	      return;
3671	    }
3672
3673	  /* The original insn could have multiple sets, so search the
3674	     insn for all sets.  */
3675	  if (GET_CODE (pattern) == SET)
3676	    {
3677	      if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3678		live_after_orig_insn = 1;
3679	    }
3680	  else if (GET_CODE (pattern) == PARALLEL)
3681	    {
3682	      for (i = 0; i < XVECLEN (pattern, 0); i++)
3683		if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3684		    && reg_overlap_mentioned_p (dest,
3685						SET_DEST (XVECEXP (pattern,
3686								   0, i))))
3687		  live_after_orig_insn = 1;
3688	    }
3689
3690	  if (! live_after_orig_insn)
3691	    abort ();
3692	}
3693    }
3694}
3695
3696/* Subroutine of update_flow_info.  Update the value of reg_n_sets for all
3697   registers modified by X.  INC is -1 if the containing insn is being deleted,
3698   and is 1 if the containing insn is a newly generated insn.  */
3699
3700static void
3701update_n_sets (x, inc)
3702     rtx x;
3703     int inc;
3704{
3705  rtx dest = SET_DEST (x);
3706
3707  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3708	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3709    dest = SUBREG_REG (dest);
3710
3711  if (GET_CODE (dest) == REG)
3712    {
3713      int regno = REGNO (dest);
3714
3715      if (regno < FIRST_PSEUDO_REGISTER)
3716	{
3717	  register int i;
3718	  int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
3719
3720	  for (i = regno; i < endregno; i++)
3721	    REG_N_SETS (i) += inc;
3722	}
3723      else
3724	REG_N_SETS (regno) += inc;
3725    }
3726}
3727
3728/* Updates all flow-analysis related quantities (including REG_NOTES) for
3729   the insns from FIRST to LAST inclusive that were created by splitting
3730   ORIG_INSN.  NOTES are the original REG_NOTES.  */
3731
3732void
3733update_flow_info (notes, first, last, orig_insn)
3734     rtx notes;
3735     rtx first, last;
3736     rtx orig_insn;
3737{
3738  rtx insn, note;
3739  rtx next;
3740  rtx orig_dest, temp;
3741  rtx set;
3742
3743  /* Get and save the destination set by the original insn.  */
3744
3745  orig_dest = single_set (orig_insn);
3746  if (orig_dest)
3747    orig_dest = SET_DEST (orig_dest);
3748
3749  /* Move REG_NOTES from the original insn to where they now belong.  */
3750
3751  for (note = notes; note; note = next)
3752    {
3753      next = XEXP (note, 1);
3754      switch (REG_NOTE_KIND (note))
3755	{
3756	case REG_DEAD:
3757	case REG_UNUSED:
3758	  /* Move these notes from the original insn to the last new insn where
3759	     the register is now set.  */
3760
3761	  for (insn = last; ; insn = PREV_INSN (insn))
3762	    {
3763	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3764		  && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3765		{
3766		  /* If this note refers to a multiple word hard register, it
3767		     may have been split into several smaller hard register
3768		     references, so handle it specially.  */
3769		  temp = XEXP (note, 0);
3770		  if (REG_NOTE_KIND (note) == REG_DEAD
3771		      && GET_CODE (temp) == REG
3772		      && REGNO (temp) < FIRST_PSEUDO_REGISTER
3773		      && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
3774		    split_hard_reg_notes (note, first, last);
3775		  else
3776		    {
3777		      XEXP (note, 1) = REG_NOTES (insn);
3778		      REG_NOTES (insn) = note;
3779		    }
3780
3781		  /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
3782		     notes.  */
3783		  /* ??? This won't handle multiple word registers correctly,
3784		     but should be good enough for now.  */
3785		  if (REG_NOTE_KIND (note) == REG_UNUSED
3786		      && GET_CODE (XEXP (note, 0)) != SCRATCH
3787		      && ! dead_or_set_p (insn, XEXP (note, 0)))
3788		    PUT_REG_NOTE_KIND (note, REG_DEAD);
3789
3790		  /* The reg only dies in one insn, the last one that uses
3791		     it.  */
3792		  break;
3793		}
3794	      /* It must die somewhere, fail it we couldn't find where it died.
3795
3796		 If this is a REG_UNUSED note, then it must be a temporary
3797		 register that was not needed by this instantiation of the
3798		 pattern, so we can safely ignore it.  */
3799	      if (insn == first)
3800		{
3801		  if (REG_NOTE_KIND (note) != REG_UNUSED)
3802		    abort ();
3803
3804		  break;
3805		}
3806	    }
3807	  break;
3808
3809	case REG_WAS_0:
3810	  /* If the insn that set the register to 0 was deleted, this
3811	     note cannot be relied on any longer.  The destination might
3812	     even have been moved to memory.
3813             This was observed for SH4 with execute/920501-6.c compilation,
3814	     -O2 -fomit-frame-pointer -finline-functions .  */
3815	  if (GET_CODE (XEXP (note, 0)) == NOTE
3816	      || INSN_DELETED_P (XEXP (note, 0)))
3817	    break;
3818	  /* This note applies to the dest of the original insn.  Find the
3819	     first new insn that now has the same dest, and move the note
3820	     there.  */
3821
3822	  if (! orig_dest)
3823	    abort ();
3824
3825	  for (insn = first; ; insn = NEXT_INSN (insn))
3826	    {
3827	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3828		  && (temp = single_set (insn))
3829		  && rtx_equal_p (SET_DEST (temp), orig_dest))
3830		{
3831		  XEXP (note, 1) = REG_NOTES (insn);
3832		  REG_NOTES (insn) = note;
3833		  /* The reg is only zero before one insn, the first that
3834		     uses it.  */
3835		  break;
3836		}
3837	      /* If this note refers to a multiple word hard
3838		 register, it may have been split into several smaller
3839		 hard register references.  We could split the notes,
3840		 but simply dropping them is good enough.  */
3841	      if (GET_CODE (orig_dest) == REG
3842		  && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3843		  && HARD_REGNO_NREGS (REGNO (orig_dest),
3844				       GET_MODE (orig_dest)) > 1)
3845		    break;
3846	      /* It must be set somewhere, fail if we couldn't find where it
3847		 was set.  */
3848	      if (insn == last)
3849		abort ();
3850	    }
3851	  break;
3852
3853	case REG_EQUAL:
3854	case REG_EQUIV:
3855	  /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
3856	     set is meaningless.  Just drop the note.  */
3857	  if (! orig_dest)
3858	    break;
3859
3860	case REG_NO_CONFLICT:
3861	  /* These notes apply to the dest of the original insn.  Find the last
3862	     new insn that now has the same dest, and move the note there.  */
3863
3864	  if (! orig_dest)
3865	    abort ();
3866
3867	  for (insn = last; ; insn = PREV_INSN (insn))
3868	    {
3869	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3870		  && (temp = single_set (insn))
3871		  && rtx_equal_p (SET_DEST (temp), orig_dest))
3872		{
3873		  XEXP (note, 1) = REG_NOTES (insn);
3874		  REG_NOTES (insn) = note;
3875		  /* Only put this note on one of the new insns.  */
3876		  break;
3877		}
3878
3879	      /* The original dest must still be set someplace.  Abort if we
3880		 couldn't find it.  */
3881	      if (insn == first)
3882		{
3883		  /* However, if this note refers to a multiple word hard
3884		     register, it may have been split into several smaller
3885		     hard register references.  We could split the notes,
3886		     but simply dropping them is good enough.  */
3887		  if (GET_CODE (orig_dest) == REG
3888		      && REGNO (orig_dest) < FIRST_PSEUDO_REGISTER
3889		      && HARD_REGNO_NREGS (REGNO (orig_dest),
3890					   GET_MODE (orig_dest)) > 1)
3891		    break;
3892		  /* Likewise for multi-word memory references.  */
3893		  if (GET_CODE (orig_dest) == MEM
3894		      && SIZE_FOR_MODE (orig_dest) > MOVE_MAX)
3895		    break;
3896		  abort ();
3897		}
3898	    }
3899	  break;
3900
3901	case REG_LIBCALL:
3902	  /* Move a REG_LIBCALL note to the first insn created, and update
3903	     the corresponding REG_RETVAL note.  */
3904	  XEXP (note, 1) = REG_NOTES (first);
3905	  REG_NOTES (first) = note;
3906
3907	  insn = XEXP (note, 0);
3908	  note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3909	  if (note)
3910	    XEXP (note, 0) = first;
3911	  break;
3912
3913	case REG_EXEC_COUNT:
3914	  /* Move a REG_EXEC_COUNT note to the first insn created.  */
3915	  XEXP (note, 1) = REG_NOTES (first);
3916	  REG_NOTES (first) = note;
3917	  break;
3918
3919	case REG_RETVAL:
3920	  /* Move a REG_RETVAL note to the last insn created, and update
3921	     the corresponding REG_LIBCALL note.  */
3922	  XEXP (note, 1) = REG_NOTES (last);
3923	  REG_NOTES (last) = note;
3924
3925	  insn = XEXP (note, 0);
3926	  note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3927	  if (note)
3928	    XEXP (note, 0) = last;
3929	  break;
3930
3931	case REG_NONNEG:
3932	case REG_BR_PROB:
3933	  /* This should be moved to whichever instruction is a JUMP_INSN.  */
3934
3935	  for (insn = last; ; insn = PREV_INSN (insn))
3936	    {
3937	      if (GET_CODE (insn) == JUMP_INSN)
3938		{
3939		  XEXP (note, 1) = REG_NOTES (insn);
3940		  REG_NOTES (insn) = note;
3941		  /* Only put this note on one of the new insns.  */
3942		  break;
3943		}
3944	      /* Fail if we couldn't find a JUMP_INSN.  */
3945	      if (insn == first)
3946		abort ();
3947	    }
3948	  break;
3949
3950	case REG_INC:
3951	  /* reload sometimes leaves obsolete REG_INC notes around.  */
3952	  if (reload_completed)
3953	    break;
3954	  /* This should be moved to whichever instruction now has the
3955	     increment operation.  */
3956	  abort ();
3957
3958	case REG_LABEL:
3959	  /* Should be moved to the new insn(s) which use the label.  */
3960	  for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
3961	    if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3962		&& reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
3963	      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL,
3964						    XEXP (note, 0),
3965						    REG_NOTES (insn));
3966	  break;
3967
3968	case REG_CC_SETTER:
3969	case REG_CC_USER:
3970	  /* These two notes will never appear until after reorg, so we don't
3971	     have to handle them here.  */
3972	default:
3973	  abort ();
3974	}
3975    }
3976
3977  /* Each new insn created, except the last, has a new set.  If the destination
3978     is a register, then this reg is now live across several insns, whereas
3979     previously the dest reg was born and died within the same insn.  To
3980     reflect this, we now need a REG_DEAD note on the insn where this
3981     dest reg dies.
3982
3983     Similarly, the new insns may have clobbers that need REG_UNUSED notes.  */
3984
3985  for (insn = first; insn != last; insn = NEXT_INSN (insn))
3986    {
3987      rtx pat;
3988      int i;
3989
3990      pat = PATTERN (insn);
3991      if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
3992	new_insn_dead_notes (pat, insn, last, orig_insn);
3993      else if (GET_CODE (pat) == PARALLEL)
3994	{
3995	  for (i = 0; i < XVECLEN (pat, 0); i++)
3996	    if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3997		|| GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
3998	      new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
3999	}
4000    }
4001
4002  /* If any insn, except the last, uses the register set by the last insn,
4003     then we need a new REG_DEAD note on that insn.  In this case, there
4004     would not have been a REG_DEAD note for this register in the original
4005     insn because it was used and set within one insn.  */
4006
4007  set = single_set (last);
4008  if (set)
4009    {
4010      rtx dest = SET_DEST (set);
4011
4012      while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4013	     || GET_CODE (dest) == STRICT_LOW_PART
4014	     || GET_CODE (dest) == SIGN_EXTRACT)
4015	dest = XEXP (dest, 0);
4016
4017      if (GET_CODE (dest) == REG
4018	  /* Global registers are always live, so the code below does not
4019	     apply to them.  */
4020	  && (REGNO (dest) >= FIRST_PSEUDO_REGISTER
4021	      || ! global_regs[REGNO (dest)]))
4022	{
4023	  rtx stop_insn = PREV_INSN (first);
4024
4025	  /* If the last insn uses the register that it is setting, then
4026	     we don't want to put a REG_DEAD note there.  Search backwards
4027	     to find the first insn that sets but does not use DEST.  */
4028
4029	  insn = last;
4030	  if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
4031	    {
4032	      for (insn = PREV_INSN (insn); insn != first;
4033		   insn = PREV_INSN (insn))
4034		{
4035		  if ((set = single_set (insn))
4036		      && reg_mentioned_p (dest, SET_DEST (set))
4037		      && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4038		    break;
4039		}
4040	    }
4041
4042	  /* Now find the first insn that uses but does not set DEST.  */
4043
4044	  for (insn = PREV_INSN (insn); insn != stop_insn;
4045	       insn = PREV_INSN (insn))
4046	    {
4047	      if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4048		  && reg_mentioned_p (dest, PATTERN (insn))
4049		  && (set = single_set (insn)))
4050		{
4051		  rtx insn_dest = SET_DEST (set);
4052
4053		  while (GET_CODE (insn_dest) == ZERO_EXTRACT
4054			 || GET_CODE (insn_dest) == SUBREG
4055			 || GET_CODE (insn_dest) == STRICT_LOW_PART
4056			 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4057		    insn_dest = XEXP (insn_dest, 0);
4058
4059		  if (insn_dest != dest)
4060		    {
4061		      note = rtx_alloc (EXPR_LIST);
4062		      PUT_REG_NOTE_KIND (note, REG_DEAD);
4063		      XEXP (note, 0) = dest;
4064		      XEXP (note, 1) = REG_NOTES (insn);
4065		      REG_NOTES (insn) = note;
4066		      /* The reg only dies in one insn, the last one
4067			 that uses it.  */
4068		      break;
4069		    }
4070		}
4071	    }
4072	}
4073    }
4074
4075  /* If the original dest is modifying a multiple register target, and the
4076     original instruction was split such that the original dest is now set
4077     by two or more SUBREG sets, then the split insns no longer kill the
4078     destination of the original insn.
4079
4080     In this case, if there exists an instruction in the same basic block,
4081     before the split insn, which uses the original dest, and this use is
4082     killed by the original insn, then we must remove the REG_DEAD note on
4083     this insn, because it is now superfluous.
4084
4085     This does not apply when a hard register gets split, because the code
4086     knows how to handle overlapping hard registers properly.  */
4087  if (orig_dest && GET_CODE (orig_dest) == REG)
4088    {
4089      int found_orig_dest = 0;
4090      int found_split_dest = 0;
4091
4092      for (insn = first; ; insn = NEXT_INSN (insn))
4093	{
4094	  rtx pat = PATTERN (insn);
4095	  int i = GET_CODE (pat) == PARALLEL ? XVECLEN (pat, 0) : 0;
4096	  set = pat;
4097	  for (;;)
4098	    {
4099	      if (GET_CODE (set) == SET)
4100		{
4101		  if (GET_CODE (SET_DEST (set)) == REG
4102		      && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4103		    {
4104		      found_orig_dest = 1;
4105		      break;
4106		    }
4107		  else if (GET_CODE (SET_DEST (set)) == SUBREG
4108			   && SUBREG_REG (SET_DEST (set)) == orig_dest)
4109		    {
4110		      found_split_dest = 1;
4111		      break;
4112		    }
4113		}
4114	      if (--i < 0)
4115		break;
4116	      set = XVECEXP (pat, 0, i);
4117	    }
4118
4119	  if (insn == last)
4120	    break;
4121	}
4122
4123      if (found_split_dest)
4124	{
4125	  /* Search backwards from FIRST, looking for the first insn that uses
4126	     the original dest.  Stop if we pass a CODE_LABEL or a JUMP_INSN.
4127	     If we find an insn, and it has a REG_DEAD note, then delete the
4128	     note.  */
4129
4130	  for (insn = first; insn; insn = PREV_INSN (insn))
4131	    {
4132	      if (GET_CODE (insn) == CODE_LABEL
4133		  || GET_CODE (insn) == JUMP_INSN)
4134		break;
4135	      else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4136		       && reg_mentioned_p (orig_dest, insn))
4137		{
4138		  note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4139		  if (note)
4140		    remove_note (insn, note);
4141		}
4142	    }
4143	}
4144      else if (! found_orig_dest)
4145	{
4146	  int i, regno;
4147
4148	  /* Should never reach here for a pseudo reg.  */
4149	  if (REGNO (orig_dest) >= FIRST_PSEUDO_REGISTER)
4150	    abort ();
4151
4152	  /* This can happen for a hard register, if the splitter
4153	     does not bother to emit instructions which would be no-ops.
4154	     We try to verify that this is the case by checking to see if
4155	     the original instruction uses all of the registers that it
4156	     set.  This case is OK, because deleting a no-op can not affect
4157	     REG_DEAD notes on other insns.  If this is not the case, then
4158	     abort.  */
4159
4160	  regno = REGNO (orig_dest);
4161	  for (i = HARD_REGNO_NREGS (regno, GET_MODE (orig_dest)) - 1;
4162	       i >= 0; i--)
4163	    if (! refers_to_regno_p (regno + i, regno + i + 1, orig_insn,
4164				     NULL_PTR))
4165	      break;
4166	  if (i >= 0)
4167	    abort ();
4168	}
4169    }
4170
4171  /* Update reg_n_sets.  This is necessary to prevent local alloc from
4172     converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4173     a reg from set once to set multiple times.  */
4174
4175  {
4176    rtx x = PATTERN (orig_insn);
4177    RTX_CODE code = GET_CODE (x);
4178
4179    if (code == SET || code == CLOBBER)
4180      update_n_sets (x, -1);
4181    else if (code == PARALLEL)
4182      {
4183	int i;
4184	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4185	  {
4186	    code = GET_CODE (XVECEXP (x, 0, i));
4187	    if (code == SET || code == CLOBBER)
4188	      update_n_sets (XVECEXP (x, 0, i), -1);
4189	  }
4190      }
4191
4192    for (insn = first; ; insn = NEXT_INSN (insn))
4193      {
4194	x = PATTERN (insn);
4195	code = GET_CODE (x);
4196
4197	if (code == SET || code == CLOBBER)
4198	  update_n_sets (x, 1);
4199	else if (code == PARALLEL)
4200	  {
4201	    int i;
4202	    for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4203	      {
4204		code = GET_CODE (XVECEXP (x, 0, i));
4205		if (code == SET || code == CLOBBER)
4206		  update_n_sets (XVECEXP (x, 0, i), 1);
4207	      }
4208	  }
4209
4210	if (insn == last)
4211	  break;
4212      }
4213  }
4214}
4215
4216/* The one entry point in this file.  DUMP_FILE is the dump file for
4217   this pass.  */
4218
4219void
4220schedule_insns (dump_file)
4221     FILE *dump_file;
4222{
4223  int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4224  int b;
4225  rtx insn;
4226
4227  /* Taking care of this degenerate case makes the rest of
4228     this code simpler.  */
4229  if (n_basic_blocks == 0)
4230    return;
4231
4232  /* Create an insn here so that we can hang dependencies off of it later.  */
4233  sched_before_next_call
4234    = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
4235		    NULL_RTX, 0, NULL_RTX, NULL_RTX);
4236
4237  /* Initialize the unused_*_lists.  We can't use the ones left over from
4238     the previous function, because gcc has freed that memory.  We can use
4239     the ones left over from the first sched pass in the second pass however,
4240     so only clear them on the first sched pass.  The first pass is before
4241     reload if flag_schedule_insns is set, otherwise it is afterwards.  */
4242
4243  if (reload_completed == 0 || ! flag_schedule_insns)
4244    {
4245      unused_insn_list = 0;
4246      unused_expr_list = 0;
4247    }
4248
4249  /* We create no insns here, only reorder them, so we
4250     remember how far we can cut back the stack on exit.  */
4251
4252  /* Allocate data for this pass.  See comments, above,
4253     for what these vectors do.
4254
4255     We use xmalloc instead of alloca, because max_uid can be very large
4256     when there is a lot of function inlining.  If we used alloca, we could
4257     exceed stack limits on some hosts for some inputs.  */
4258  insn_luid = (int *) xmalloc (max_uid * sizeof (int));
4259  insn_priority = (int *) xmalloc (max_uid * sizeof (int));
4260  insn_tick = (int *) xmalloc (max_uid * sizeof (int));
4261  insn_costs = (short *) xmalloc (max_uid * sizeof (short));
4262  insn_units = (short *) xmalloc (max_uid * sizeof (short));
4263  insn_blockage = (unsigned int *) xmalloc (max_uid * sizeof (unsigned int));
4264  insn_ref_count = (int *) xmalloc (max_uid * sizeof (int));
4265
4266  if (reload_completed == 0)
4267    {
4268      sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4269      sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4270      bb_dead_regs = ALLOCA_REG_SET ();
4271      bb_live_regs = ALLOCA_REG_SET ();
4272      bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
4273      bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
4274    }
4275  else
4276    {
4277      sched_reg_n_calls_crossed = 0;
4278      sched_reg_live_length = 0;
4279      bb_dead_regs = 0;
4280      bb_live_regs = 0;
4281    }
4282  init_alias_analysis ();
4283
4284  if (write_symbols != NO_DEBUG)
4285    {
4286      rtx line;
4287
4288      line_note = (rtx *) xmalloc (max_uid * sizeof (rtx));
4289      bzero ((char *) line_note, max_uid * sizeof (rtx));
4290      line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4291      bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
4292
4293      /* Determine the line-number at the start of each basic block.
4294	 This must be computed and saved now, because after a basic block's
4295	 predecessor has been scheduled, it is impossible to accurately
4296	 determine the correct line number for the first insn of the block.  */
4297
4298      for (b = 0; b < n_basic_blocks; b++)
4299	for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
4300	  if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4301	    {
4302	      line_note_head[b] = line;
4303	      break;
4304	    }
4305    }
4306
4307  bzero ((char *) insn_luid, max_uid * sizeof (int));
4308  bzero ((char *) insn_priority, max_uid * sizeof (int));
4309  bzero ((char *) insn_tick, max_uid * sizeof (int));
4310  bzero ((char *) insn_costs, max_uid * sizeof (short));
4311  bzero ((char *) insn_units, max_uid * sizeof (short));
4312  bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
4313  bzero ((char *) insn_ref_count, max_uid * sizeof (int));
4314
4315  /* Schedule each basic block, block by block.  */
4316
4317  /* ??? Add a NOTE after the last insn of the last basic block.  It is not
4318     known why this is done.  */
4319  /* ??? Perhaps it's done to ensure NEXT_TAIL in schedule_block is a
4320     valid insn.  */
4321
4322  insn = BLOCK_END (n_basic_blocks-1);
4323  if (NEXT_INSN (insn) == 0
4324      || (GET_CODE (insn) != NOTE
4325	  && GET_CODE (insn) != CODE_LABEL
4326	  /* Don't emit a NOTE if it would end up between an unconditional
4327	     jump and a BARRIER.  */
4328	  && ! (GET_CODE (insn) == JUMP_INSN
4329		&& GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4330    emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks-1));
4331
4332  for (b = 0; b < n_basic_blocks; b++)
4333    {
4334      note_list = 0;
4335
4336      split_block_insns (b, reload_completed == 0 || ! flag_schedule_insns);
4337
4338      schedule_block (b, dump_file);
4339
4340#ifdef USE_C_ALLOCA
4341      alloca (0);
4342#endif
4343    }
4344
4345  /* Reposition the prologue and epilogue notes in case we moved the
4346     prologue/epilogue insns.  */
4347  if (reload_completed)
4348    reposition_prologue_and_epilogue_notes (get_insns ());
4349
4350  if (write_symbols != NO_DEBUG)
4351    {
4352      rtx line = 0;
4353      rtx insn = get_insns ();
4354      int active_insn = 0;
4355      int notes = 0;
4356
4357      /* Walk the insns deleting redundant line-number notes.  Many of these
4358	 are already present.  The remainder tend to occur at basic
4359	 block boundaries.  */
4360      for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4361	if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4362	  {
4363	    /* If there are no active insns following, INSN is redundant.  */
4364	    if (active_insn == 0)
4365	      {
4366		notes++;
4367		NOTE_SOURCE_FILE (insn) = 0;
4368		NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4369	      }
4370	    /* If the line number is unchanged, LINE is redundant.  */
4371	    else if (line
4372		     && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4373		     && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4374	      {
4375		notes++;
4376		NOTE_SOURCE_FILE (line) = 0;
4377		NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4378		line = insn;
4379	      }
4380	    else
4381	      line = insn;
4382	    active_insn = 0;
4383	  }
4384	else if (! ((GET_CODE (insn) == NOTE
4385		     && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4386		    || (GET_CODE (insn) == INSN
4387			&& (GET_CODE (PATTERN (insn)) == USE
4388			    || GET_CODE (PATTERN (insn)) == CLOBBER))))
4389	  active_insn++;
4390
4391      if (dump_file && notes)
4392	fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4393    }
4394
4395  if (reload_completed == 0)
4396    {
4397      int regno;
4398      for (regno = 0; regno < max_regno; regno++)
4399	if (sched_reg_live_length[regno])
4400	  {
4401	    if (dump_file)
4402	      {
4403		if (REG_LIVE_LENGTH (regno) > sched_reg_live_length[regno])
4404		  fprintf (dump_file,
4405			   ";; register %d life shortened from %d to %d\n",
4406			   regno, REG_LIVE_LENGTH (regno),
4407			   sched_reg_live_length[regno]);
4408		/* Negative values are special; don't overwrite the current
4409		   reg_live_length value if it is negative.  */
4410		else if (REG_LIVE_LENGTH (regno) < sched_reg_live_length[regno]
4411			 && REG_LIVE_LENGTH (regno) >= 0)
4412		  fprintf (dump_file,
4413			   ";; register %d life extended from %d to %d\n",
4414			   regno, REG_LIVE_LENGTH (regno),
4415			   sched_reg_live_length[regno]);
4416
4417		if (! REG_N_CALLS_CROSSED (regno)
4418		    && sched_reg_n_calls_crossed[regno])
4419		  fprintf (dump_file,
4420			   ";; register %d now crosses calls\n", regno);
4421		else if (REG_N_CALLS_CROSSED (regno)
4422			 && ! sched_reg_n_calls_crossed[regno]
4423			 && REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4424		  fprintf (dump_file,
4425			   ";; register %d no longer crosses calls\n", regno);
4426
4427	      }
4428	    /* Negative values are special; don't overwrite the current
4429	       reg_live_length value if it is negative.  */
4430	    if (REG_LIVE_LENGTH (regno) >= 0)
4431	      REG_LIVE_LENGTH (regno) = sched_reg_live_length[regno];
4432
4433	    /* We can't change the value of reg_n_calls_crossed to zero for
4434	       pseudos which are live in more than one block.
4435
4436	       This is because combine might have made an optimization which
4437	       invalidated basic_block_live_at_start and reg_n_calls_crossed,
4438	       but it does not update them.  If we update reg_n_calls_crossed
4439	       here, the two variables are now inconsistent, and this might
4440	       confuse the caller-save code into saving a register that doesn't
4441	       need to be saved.  This is only a problem when we zero calls
4442	       crossed for a pseudo live in multiple basic blocks.
4443
4444	       Alternatively, we could try to correctly update basic block live
4445	       at start here in sched, but that seems complicated.  */
4446	    if (sched_reg_n_calls_crossed[regno]
4447		|| REG_BASIC_BLOCK (regno) != REG_BLOCK_GLOBAL)
4448	      REG_N_CALLS_CROSSED (regno) = sched_reg_n_calls_crossed[regno];
4449	  }
4450    }
4451
4452  free (insn_luid);
4453  free (insn_priority);
4454  free (insn_tick);
4455  free (insn_costs);
4456  free (insn_units);
4457  free (insn_blockage);
4458  free (insn_ref_count);
4459
4460  if (write_symbols != NO_DEBUG)
4461    free (line_note);
4462
4463  if (reload_completed == 0)
4464    {
4465      FREE_REG_SET (bb_dead_regs);
4466      FREE_REG_SET (bb_live_regs);
4467    }
4468
4469}
4470#endif /* INSN_SCHEDULING */
4471