sched-deps.c revision 119256
1/* Instruction scheduling pass.  This file computes dependencies between
2   instructions.
3   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4   1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6   and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
12Software Foundation; either version 2, or (at your option) any later
13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
21along with GCC; see the file COPYING.  If not, write to the Free
22Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2302111-1307, USA.  */
24
25#include "config.h"
26#include "system.h"
27#include "toplev.h"
28#include "rtl.h"
29#include "tm_p.h"
30#include "hard-reg-set.h"
31#include "basic-block.h"
32#include "regs.h"
33#include "function.h"
34#include "flags.h"
35#include "insn-config.h"
36#include "insn-attr.h"
37#include "except.h"
38#include "toplev.h"
39#include "recog.h"
40#include "sched-int.h"
41#include "params.h"
42#include "cselib.h"
43
44extern char *reg_known_equiv_p;
45extern rtx *reg_known_value;
46
47static regset_head reg_pending_sets_head;
48static regset_head reg_pending_clobbers_head;
49static regset_head reg_pending_uses_head;
50
51static regset reg_pending_sets;
52static regset reg_pending_clobbers;
53static regset reg_pending_uses;
54static bool reg_pending_barrier;
55
56/* To speed up the test for duplicate dependency links we keep a
57   record of dependencies created by add_dependence when the average
58   number of instructions in a basic block is very large.
59
60   Studies have shown that there is typically around 5 instructions between
61   branches for typical C code.  So we can make a guess that the average
62   basic block is approximately 5 instructions long; we will choose 100X
63   the average size as a very large basic block.
64
65   Each insn has associated bitmaps for its dependencies.  Each bitmap
66   has enough entries to represent a dependency on any other insn in
67   the insn chain.  All bitmap for true dependencies cache is
68   allocated then the rest two ones are also allocated.  */
69static sbitmap *true_dependency_cache;
70static sbitmap *anti_dependency_cache;
71static sbitmap *output_dependency_cache;
72
73/* To speed up checking consistency of formed forward insn
74   dependencies we use the following cache.  Another possible solution
75   could be switching off checking duplication of insns in forward
76   dependencies.  */
77#ifdef ENABLE_CHECKING
78static sbitmap *forward_dependency_cache;
79#endif
80
81static int deps_may_trap_p PARAMS ((rtx));
82static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
83static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
84static void remove_dependence PARAMS ((rtx, rtx));
85static void set_sched_group_p PARAMS ((rtx));
86
87static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
88static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
89static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
90static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
91static rtx group_leader PARAMS ((rtx));
92
93static rtx get_condition PARAMS ((rtx));
94static int conditions_mutex_p PARAMS ((rtx, rtx));
95
96/* Return nonzero if a load of the memory reference MEM can cause a trap.  */
97
98static int
99deps_may_trap_p (mem)
100     rtx mem;
101{
102  rtx addr = XEXP (mem, 0);
103
104  if (REG_P (addr)
105      && REGNO (addr) >= FIRST_PSEUDO_REGISTER
106      && reg_known_value[REGNO (addr)])
107    addr = reg_known_value[REGNO (addr)];
108  return rtx_addr_can_trap_p (addr);
109}
110
111/* Return the INSN_LIST containing INSN in LIST, or NULL
112   if LIST does not contain INSN.  */
113
114rtx
115find_insn_list (insn, list)
116     rtx insn;
117     rtx list;
118{
119  while (list)
120    {
121      if (XEXP (list, 0) == insn)
122	return list;
123      list = XEXP (list, 1);
124    }
125  return 0;
126}
127
128/* Find the condition under which INSN is executed.  */
129
130static rtx
131get_condition (insn)
132     rtx insn;
133{
134  rtx pat = PATTERN (insn);
135  rtx cond;
136
137  if (pat == 0)
138    return 0;
139  if (GET_CODE (pat) == COND_EXEC)
140    return COND_EXEC_TEST (pat);
141  if (GET_CODE (insn) != JUMP_INSN)
142    return 0;
143  if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
144    return 0;
145  if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
146    return 0;
147  pat = SET_DEST (pat);
148  cond = XEXP (pat, 0);
149  if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
150      && XEXP (cond, 2) == pc_rtx)
151    return cond;
152  else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
153	   && XEXP (cond, 1) == pc_rtx)
154    return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
155			   XEXP (cond, 0), XEXP (cond, 1));
156  else
157    return 0;
158}
159
160/* Return nonzero if conditions COND1 and COND2 can never be both true.  */
161
162static int
163conditions_mutex_p (cond1, cond2)
164     rtx cond1, cond2;
165{
166  if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
167      && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
168      && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
169      && XEXP (cond1, 0) == XEXP (cond2, 0)
170      && XEXP (cond1, 1) == XEXP (cond2, 1))
171    return 1;
172  return 0;
173}
174
175/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176   LOG_LINKS of INSN, if not already there.  DEP_TYPE indicates the type
177   of dependence that this link represents.  */
178
179void
180add_dependence (insn, elem, dep_type)
181     rtx insn;
182     rtx elem;
183     enum reg_note dep_type;
184{
185  rtx link, next;
186  int present_p;
187  rtx cond1, cond2;
188
189  /* Don't depend an insn on itself.  */
190  if (insn == elem)
191    return;
192
193  /* We can get a dependency on deleted insns due to optimizations in
194     the register allocation and reloading or due to splitting.  Any
195     such dependency is useless and can be ignored.  */
196  if (GET_CODE (elem) == NOTE)
197    return;
198
199  /* flow.c doesn't handle conditional lifetimes entirely correctly;
200     calls mess up the conditional lifetimes.  */
201  /* ??? add_dependence is the wrong place to be eliding dependencies,
202     as that forgets that the condition expressions themselves may
203     be dependent.  */
204  if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
205    {
206      cond1 = get_condition (insn);
207      cond2 = get_condition (elem);
208      if (cond1 && cond2
209	  && conditions_mutex_p (cond1, cond2)
210	  /* Make sure first instruction doesn't affect condition of second
211	     instruction if switched.  */
212	  && !modified_in_p (cond1, elem)
213	  /* Make sure second instruction doesn't affect condition of first
214	     instruction if switched.  */
215	  && !modified_in_p (cond2, insn))
216	return;
217    }
218
219  /* If elem is part of a sequence that must be scheduled together, then
220     make the dependence point to the last insn of the sequence.
221     When HAVE_cc0, it is possible for NOTEs to exist between users and
222     setters of the condition codes, so we must skip past notes here.
223     Otherwise, NOTEs are impossible here.  */
224  next = next_nonnote_insn (elem);
225  if (next && INSN_P (next) && SCHED_GROUP_P (next))
226    {
227      /* Notes will never intervene here though, so don't bother checking
228         for them.  */
229      /* Hah!  Wrong.  */
230      /* We must reject CODE_LABELs, so that we don't get confused by one
231         that has LABEL_PRESERVE_P set, which is represented by the same
232         bit in the rtl as SCHED_GROUP_P.  A CODE_LABEL can never be
233         SCHED_GROUP_P.  */
234
235      rtx nnext;
236      while ((nnext = next_nonnote_insn (next)) != NULL
237	     && INSN_P (nnext)
238	     && SCHED_GROUP_P (nnext))
239	next = nnext;
240
241      /* Again, don't depend an insn on itself.  */
242      if (insn == next)
243	return;
244
245      /* Make the dependence to NEXT, the last insn of the group, instead
246         of the original ELEM.  */
247      elem = next;
248    }
249
250  present_p = 1;
251#ifdef INSN_SCHEDULING
252  /* ??? No good way to tell from here whether we're doing interblock
253     scheduling.  Possibly add another callback.  */
254#if 0
255  /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
256     No need for interblock dependences with calls, since
257     calls are not moved between blocks.   Note: the edge where
258     elem is a CALL is still required.  */
259  if (GET_CODE (insn) == CALL_INSN
260      && (INSN_BB (elem) != INSN_BB (insn)))
261    return;
262#endif
263
264  /* If we already have a dependency for ELEM, then we do not need to
265     do anything.  Avoiding the list walk below can cut compile times
266     dramatically for some code.  */
267  if (true_dependency_cache != NULL)
268    {
269      enum reg_note present_dep_type = 0;
270
271      if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
272	abort ();
273      if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
274	/* Do nothing (present_set_type is already 0).  */
275	;
276      else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
277			 INSN_LUID (elem)))
278	present_dep_type = REG_DEP_ANTI;
279      else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
280			 INSN_LUID (elem)))
281	present_dep_type = REG_DEP_OUTPUT;
282      else
283	present_p = 0;
284      if (present_p && (int) dep_type >= (int) present_dep_type)
285	return;
286    }
287#endif
288
289  /* Check that we don't already have this dependence.  */
290  if (present_p)
291    for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
292      if (XEXP (link, 0) == elem)
293	{
294#ifdef INSN_SCHEDULING
295	  /* Clear corresponding cache entry because type of the link
296             may be changed.  */
297	  if (true_dependency_cache != NULL)
298	    {
299	      if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
300		RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
301			   INSN_LUID (elem));
302	      else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
303		       && output_dependency_cache)
304		RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
305			   INSN_LUID (elem));
306	      else
307		abort ();
308	    }
309#endif
310
311	  /* If this is a more restrictive type of dependence than the existing
312	     one, then change the existing dependence to this type.  */
313	  if ((int) dep_type < (int) REG_NOTE_KIND (link))
314	    PUT_REG_NOTE_KIND (link, dep_type);
315
316#ifdef INSN_SCHEDULING
317	  /* If we are adding a dependency to INSN's LOG_LINKs, then
318	     note that in the bitmap caches of dependency information.  */
319	  if (true_dependency_cache != NULL)
320	    {
321	      if ((int) REG_NOTE_KIND (link) == 0)
322		SET_BIT (true_dependency_cache[INSN_LUID (insn)],
323			 INSN_LUID (elem));
324	      else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
325		SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
326			 INSN_LUID (elem));
327	      else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
328		SET_BIT (output_dependency_cache[INSN_LUID (insn)],
329			 INSN_LUID (elem));
330	    }
331#endif
332	  return;
333      }
334  /* Might want to check one level of transitivity to save conses.  */
335
336  link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
337  LOG_LINKS (insn) = link;
338
339  /* Insn dependency, not data dependency.  */
340  PUT_REG_NOTE_KIND (link, dep_type);
341
342#ifdef INSN_SCHEDULING
343  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
344     in the bitmap caches of dependency information.  */
345  if (true_dependency_cache != NULL)
346    {
347      if ((int) dep_type == 0)
348	SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
349      else if (dep_type == REG_DEP_ANTI)
350	SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
351      else if (dep_type == REG_DEP_OUTPUT)
352	SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
353    }
354#endif
355}
356
357/* A convenience wrapper to operate on an entire list.  */
358
359static void
360add_dependence_list (insn, list, dep_type)
361     rtx insn, list;
362     enum reg_note dep_type;
363{
364  for (; list; list = XEXP (list, 1))
365    add_dependence (insn, XEXP (list, 0), dep_type);
366}
367
368/* Similar, but free *LISTP at the same time.  */
369
370static void
371add_dependence_list_and_free (insn, listp, dep_type)
372     rtx insn;
373     rtx *listp;
374     enum reg_note dep_type;
375{
376  rtx list, next;
377  for (list = *listp, *listp = NULL; list ; list = next)
378    {
379      next = XEXP (list, 1);
380      add_dependence (insn, XEXP (list, 0), dep_type);
381      free_INSN_LIST_node (list);
382    }
383}
384
385/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
386   of INSN.  Abort if not found.  */
387
388static void
389remove_dependence (insn, elem)
390     rtx insn;
391     rtx elem;
392{
393  rtx prev, link, next;
394  int found = 0;
395
396  for (prev = 0, link = LOG_LINKS (insn); link; link = next)
397    {
398      next = XEXP (link, 1);
399      if (XEXP (link, 0) == elem)
400	{
401	  if (prev)
402	    XEXP (prev, 1) = next;
403	  else
404	    LOG_LINKS (insn) = next;
405
406#ifdef INSN_SCHEDULING
407	  /* If we are removing a dependency from the LOG_LINKS list,
408	     make sure to remove it from the cache too.  */
409	  if (true_dependency_cache != NULL)
410	    {
411	      if (REG_NOTE_KIND (link) == 0)
412		RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
413			   INSN_LUID (elem));
414	      else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
415		RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
416			   INSN_LUID (elem));
417	      else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
418		RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
419			   INSN_LUID (elem));
420	    }
421#endif
422
423	  free_INSN_LIST_node (link);
424
425	  found = 1;
426	}
427      else
428	prev = link;
429    }
430
431  if (!found)
432    abort ();
433  return;
434}
435
436/* Return an insn which represents a SCHED_GROUP, which is
437   the last insn in the group.  */
438
439static rtx
440group_leader (insn)
441     rtx insn;
442{
443  rtx prev;
444
445  do
446    {
447      prev = insn;
448      insn = next_nonnote_insn (insn);
449    }
450  while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
451
452  return prev;
453}
454
455/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
456   goes along with that.  */
457
458static void
459set_sched_group_p (insn)
460     rtx insn;
461{
462  rtx link, prev;
463
464  SCHED_GROUP_P (insn) = 1;
465
466  /* There may be a note before this insn now, but all notes will
467     be removed before we actually try to schedule the insns, so
468     it won't cause a problem later.  We must avoid it here though.  */
469  prev = prev_nonnote_insn (insn);
470
471  /* Make a copy of all dependencies on the immediately previous insn,
472     and add to this insn.  This is so that all the dependencies will
473     apply to the group.  Remove an explicit dependence on this insn
474     as SCHED_GROUP_P now represents it.  */
475
476  if (find_insn_list (prev, LOG_LINKS (insn)))
477    remove_dependence (insn, prev);
478
479  for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
480    add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
481}
482
483/* Process an insn's memory dependencies.  There are four kinds of
484   dependencies:
485
486   (0) read dependence: read follows read
487   (1) true dependence: read follows write
488   (2) anti dependence: write follows read
489   (3) output dependence: write follows write
490
491   We are careful to build only dependencies which actually exist, and
492   use transitivity to avoid building too many links.  */
493
494/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
495   The MEM is a memory reference contained within INSN, which we are saving
496   so that we can do memory aliasing on it.  */
497
498void
499add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
500     struct deps *deps;
501     rtx *insn_list, *mem_list, insn, mem;
502{
503  rtx link;
504
505  link = alloc_INSN_LIST (insn, *insn_list);
506  *insn_list = link;
507
508  if (current_sched_info->use_cselib)
509    {
510      mem = shallow_copy_rtx (mem);
511      XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
512    }
513  link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
514  *mem_list = link;
515
516  deps->pending_lists_length++;
517}
518
519/* Make a dependency between every memory reference on the pending lists
520   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
521   dependencies for a read operation, similarly with FOR_WRITE.  */
522
523static void
524flush_pending_lists (deps, insn, for_read, for_write)
525     struct deps *deps;
526     rtx insn;
527     int for_read, for_write;
528{
529  if (for_write)
530    {
531      add_dependence_list_and_free (insn, &deps->pending_read_insns,
532				    REG_DEP_ANTI);
533      free_EXPR_LIST_list (&deps->pending_read_mems);
534    }
535
536  add_dependence_list_and_free (insn, &deps->pending_write_insns,
537				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
538  free_EXPR_LIST_list (&deps->pending_write_mems);
539  deps->pending_lists_length = 0;
540
541  add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
542				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
543  deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
544  deps->pending_flush_length = 1;
545}
546
547/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
548   rtx, X, creating all dependencies generated by the write to the
549   destination of X, and reads of everything mentioned.  */
550
551static void
552sched_analyze_1 (deps, x, insn)
553     struct deps *deps;
554     rtx x;
555     rtx insn;
556{
557  int regno;
558  rtx dest = XEXP (x, 0);
559  enum rtx_code code = GET_CODE (x);
560
561  if (dest == 0)
562    return;
563
564  if (GET_CODE (dest) == PARALLEL)
565    {
566      int i;
567
568      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
569	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
570	  sched_analyze_1 (deps,
571			   gen_rtx_CLOBBER (VOIDmode,
572					    XEXP (XVECEXP (dest, 0, i), 0)),
573			   insn);
574
575      if (GET_CODE (x) == SET)
576	sched_analyze_2 (deps, SET_SRC (x), insn);
577      return;
578    }
579
580  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
581	 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
582    {
583      if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
584	{
585	  /* The second and third arguments are values read by this insn.  */
586	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
587	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
588	}
589      dest = XEXP (dest, 0);
590    }
591
592  if (GET_CODE (dest) == REG)
593    {
594      regno = REGNO (dest);
595
596      /* A hard reg in a wide mode may really be multiple registers.
597         If so, mark all of them just like the first.  */
598      if (regno < FIRST_PSEUDO_REGISTER)
599	{
600	  int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
601	  if (code == SET)
602	    {
603	      while (--i >= 0)
604		SET_REGNO_REG_SET (reg_pending_sets, regno + i);
605	    }
606	  else
607	    {
608	      while (--i >= 0)
609		SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
610	    }
611	}
612      /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
613	 it does not reload.  Ignore these as they have served their
614	 purpose already.  */
615      else if (regno >= deps->max_reg)
616	{
617	  if (GET_CODE (PATTERN (insn)) != USE
618	      && GET_CODE (PATTERN (insn)) != CLOBBER)
619	    abort ();
620	}
621      else
622	{
623	  if (code == SET)
624	    SET_REGNO_REG_SET (reg_pending_sets, regno);
625	  else
626	    SET_REGNO_REG_SET (reg_pending_clobbers, regno);
627
628	  /* Pseudos that are REG_EQUIV to something may be replaced
629	     by that during reloading.  We need only add dependencies for
630	     the address in the REG_EQUIV note.  */
631	  if (!reload_completed
632	      && reg_known_equiv_p[regno]
633	      && GET_CODE (reg_known_value[regno]) == MEM)
634	    sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
635
636	  /* Don't let it cross a call after scheduling if it doesn't
637	     already cross one.  */
638	  if (REG_N_CALLS_CROSSED (regno) == 0)
639	    add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
640	}
641    }
642  else if (GET_CODE (dest) == MEM)
643    {
644      /* Writing memory.  */
645      rtx t = dest;
646
647      if (current_sched_info->use_cselib)
648	{
649	  t = shallow_copy_rtx (dest);
650	  cselib_lookup (XEXP (t, 0), Pmode, 1);
651	  XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
652	}
653
654      if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
655	{
656	  /* Flush all pending reads and writes to prevent the pending lists
657	     from getting any larger.  Insn scheduling runs too slowly when
658	     these lists get long.  When compiling GCC with itself,
659	     this flush occurs 8 times for sparc, and 10 times for m88k using
660	     the default value of 32.  */
661	  flush_pending_lists (deps, insn, false, true);
662	}
663      else
664	{
665	  rtx pending, pending_mem;
666
667	  pending = deps->pending_read_insns;
668	  pending_mem = deps->pending_read_mems;
669	  while (pending)
670	    {
671	      if (anti_dependence (XEXP (pending_mem, 0), t))
672		add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
673
674	      pending = XEXP (pending, 1);
675	      pending_mem = XEXP (pending_mem, 1);
676	    }
677
678	  pending = deps->pending_write_insns;
679	  pending_mem = deps->pending_write_mems;
680	  while (pending)
681	    {
682	      if (output_dependence (XEXP (pending_mem, 0), t))
683		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
684
685	      pending = XEXP (pending, 1);
686	      pending_mem = XEXP (pending_mem, 1);
687	    }
688
689	  add_dependence_list (insn, deps->last_pending_memory_flush,
690			       REG_DEP_ANTI);
691
692	  add_insn_mem_dependence (deps, &deps->pending_write_insns,
693				   &deps->pending_write_mems, insn, dest);
694	}
695      sched_analyze_2 (deps, XEXP (dest, 0), insn);
696    }
697
698  /* Analyze reads.  */
699  if (GET_CODE (x) == SET)
700    sched_analyze_2 (deps, SET_SRC (x), insn);
701}
702
703/* Analyze the uses of memory and registers in rtx X in INSN.  */
704
705static void
706sched_analyze_2 (deps, x, insn)
707     struct deps *deps;
708     rtx x;
709     rtx insn;
710{
711  int i;
712  int j;
713  enum rtx_code code;
714  const char *fmt;
715
716  if (x == 0)
717    return;
718
719  code = GET_CODE (x);
720
721  switch (code)
722    {
723    case CONST_INT:
724    case CONST_DOUBLE:
725    case CONST_VECTOR:
726    case SYMBOL_REF:
727    case CONST:
728    case LABEL_REF:
729      /* Ignore constants.  Note that we must handle CONST_DOUBLE here
730         because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
731         this does not mean that this insn is using cc0.  */
732      return;
733
734#ifdef HAVE_cc0
735    case CC0:
736      /* User of CC0 depends on immediately preceding insn.  */
737      set_sched_group_p (insn);
738      return;
739#endif
740
741    case REG:
742      {
743	int regno = REGNO (x);
744	if (regno < FIRST_PSEUDO_REGISTER)
745	  {
746	    int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
747	    while (--i >= 0)
748	      SET_REGNO_REG_SET (reg_pending_uses, regno + i);
749	  }
750	/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
751	   it does not reload.  Ignore these as they have served their
752	   purpose already.  */
753	else if (regno >= deps->max_reg)
754	  {
755	    if (GET_CODE (PATTERN (insn)) != USE
756		&& GET_CODE (PATTERN (insn)) != CLOBBER)
757	      abort ();
758	  }
759	else
760	  {
761	    SET_REGNO_REG_SET (reg_pending_uses, regno);
762
763	    /* Pseudos that are REG_EQUIV to something may be replaced
764	       by that during reloading.  We need only add dependencies for
765	       the address in the REG_EQUIV note.  */
766	    if (!reload_completed
767		&& reg_known_equiv_p[regno]
768		&& GET_CODE (reg_known_value[regno]) == MEM)
769	      sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
770
771	    /* If the register does not already cross any calls, then add this
772	       insn to the sched_before_next_call list so that it will still
773	       not cross calls after scheduling.  */
774	    if (REG_N_CALLS_CROSSED (regno) == 0)
775	      deps->sched_before_next_call
776		= alloc_INSN_LIST (insn, deps->sched_before_next_call);
777	  }
778	return;
779      }
780
781    case MEM:
782      {
783	/* Reading memory.  */
784	rtx u;
785	rtx pending, pending_mem;
786	rtx t = x;
787
788	if (current_sched_info->use_cselib)
789	  {
790	    t = shallow_copy_rtx (t);
791	    cselib_lookup (XEXP (t, 0), Pmode, 1);
792	    XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
793	  }
794	pending = deps->pending_read_insns;
795	pending_mem = deps->pending_read_mems;
796	while (pending)
797	  {
798	    if (read_dependence (XEXP (pending_mem, 0), t))
799	      add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
800
801	    pending = XEXP (pending, 1);
802	    pending_mem = XEXP (pending_mem, 1);
803	  }
804
805	pending = deps->pending_write_insns;
806	pending_mem = deps->pending_write_mems;
807	while (pending)
808	  {
809	    if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
810				 t, rtx_varies_p))
811	      add_dependence (insn, XEXP (pending, 0), 0);
812
813	    pending = XEXP (pending, 1);
814	    pending_mem = XEXP (pending_mem, 1);
815	  }
816
817	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
818	  if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
819	      || deps_may_trap_p (x))
820	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
821
822	/* Always add these dependencies to pending_reads, since
823	   this insn may be followed by a write.  */
824	add_insn_mem_dependence (deps, &deps->pending_read_insns,
825				 &deps->pending_read_mems, insn, x);
826
827	/* Take advantage of tail recursion here.  */
828	sched_analyze_2 (deps, XEXP (x, 0), insn);
829	return;
830      }
831
832    /* Force pending stores to memory in case a trap handler needs them.  */
833    case TRAP_IF:
834      flush_pending_lists (deps, insn, true, false);
835      break;
836
837    case ASM_OPERANDS:
838    case ASM_INPUT:
839    case UNSPEC_VOLATILE:
840      {
841	/* Traditional and volatile asm instructions must be considered to use
842	   and clobber all hard registers, all pseudo-registers and all of
843	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
844
845	   Consider for instance a volatile asm that changes the fpu rounding
846	   mode.  An insn should not be moved across this even if it only uses
847	   pseudo-regs because it might give an incorrectly rounded result.  */
848	if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
849	  reg_pending_barrier = true;
850
851	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
852	   We can not just fall through here since then we would be confused
853	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
854	   traditional asms unlike their normal usage.  */
855
856	if (code == ASM_OPERANDS)
857	  {
858	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
859	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
860	    return;
861	  }
862	break;
863      }
864
865    case PRE_DEC:
866    case POST_DEC:
867    case PRE_INC:
868    case POST_INC:
869      /* These both read and modify the result.  We must handle them as writes
870         to get proper dependencies for following instructions.  We must handle
871         them as reads to get proper dependencies from this to previous
872         instructions.  Thus we need to pass them to both sched_analyze_1
873         and sched_analyze_2.  We must call sched_analyze_2 first in order
874         to get the proper antecedent for the read.  */
875      sched_analyze_2 (deps, XEXP (x, 0), insn);
876      sched_analyze_1 (deps, x, insn);
877      return;
878
879    case POST_MODIFY:
880    case PRE_MODIFY:
881      /* op0 = op0 + op1 */
882      sched_analyze_2 (deps, XEXP (x, 0), insn);
883      sched_analyze_2 (deps, XEXP (x, 1), insn);
884      sched_analyze_1 (deps, x, insn);
885      return;
886
887    default:
888      break;
889    }
890
891  /* Other cases: walk the insn.  */
892  fmt = GET_RTX_FORMAT (code);
893  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
894    {
895      if (fmt[i] == 'e')
896	sched_analyze_2 (deps, XEXP (x, i), insn);
897      else if (fmt[i] == 'E')
898	for (j = 0; j < XVECLEN (x, i); j++)
899	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
900    }
901}
902
903/* Analyze an INSN with pattern X to find all dependencies.  */
904
905static void
906sched_analyze_insn (deps, x, insn, loop_notes)
907     struct deps *deps;
908     rtx x, insn;
909     rtx loop_notes;
910{
911  RTX_CODE code = GET_CODE (x);
912  rtx link;
913  int i;
914
915  if (code == COND_EXEC)
916    {
917      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
918
919      /* ??? Should be recording conditions so we reduce the number of
920	 false dependencies.  */
921      x = COND_EXEC_CODE (x);
922      code = GET_CODE (x);
923    }
924  if (code == SET || code == CLOBBER)
925    {
926      sched_analyze_1 (deps, x, insn);
927
928      /* Bare clobber insns are used for letting life analysis, reg-stack
929	 and others know that a value is dead.  Depend on the last call
930	 instruction so that reg-stack won't get confused.  */
931      if (code == CLOBBER)
932	add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
933    }
934  else if (code == PARALLEL)
935    {
936      int i;
937      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
938	{
939	  rtx sub = XVECEXP (x, 0, i);
940	  code = GET_CODE (sub);
941
942	  if (code == COND_EXEC)
943	    {
944	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
945	      sub = COND_EXEC_CODE (sub);
946	      code = GET_CODE (sub);
947	    }
948	  if (code == SET || code == CLOBBER)
949	    sched_analyze_1 (deps, sub, insn);
950	  else
951	    sched_analyze_2 (deps, sub, insn);
952	}
953    }
954  else
955    sched_analyze_2 (deps, x, insn);
956
957  /* Mark registers CLOBBERED or used by called function.  */
958  if (GET_CODE (insn) == CALL_INSN)
959    {
960      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
961	{
962	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
963	    sched_analyze_1 (deps, XEXP (link, 0), insn);
964	  else
965	    sched_analyze_2 (deps, XEXP (link, 0), insn);
966	}
967      if (find_reg_note (insn, REG_SETJMP, NULL))
968	reg_pending_barrier = true;
969    }
970
971  if (GET_CODE (insn) == JUMP_INSN)
972    {
973      rtx next;
974      next = next_nonnote_insn (insn);
975      if (next && GET_CODE (next) == BARRIER)
976	reg_pending_barrier = true;
977      else
978	{
979	  rtx pending, pending_mem;
980	  regset_head tmp_uses, tmp_sets;
981	  INIT_REG_SET (&tmp_uses);
982	  INIT_REG_SET (&tmp_sets);
983
984	  (*current_sched_info->compute_jump_reg_dependencies)
985	    (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
986	  IOR_REG_SET (reg_pending_uses, &tmp_uses);
987	  IOR_REG_SET (reg_pending_sets, &tmp_sets);
988
989	  CLEAR_REG_SET (&tmp_uses);
990	  CLEAR_REG_SET (&tmp_sets);
991
992	  /* All memory writes and volatile reads must happen before the
993	     jump.  Non-volatile reads must happen before the jump iff
994	     the result is needed by the above register used mask.  */
995
996	  pending = deps->pending_write_insns;
997	  pending_mem = deps->pending_write_mems;
998	  while (pending)
999	    {
1000	      add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1001	      pending = XEXP (pending, 1);
1002	      pending_mem = XEXP (pending_mem, 1);
1003	    }
1004
1005	  pending = deps->pending_read_insns;
1006	  pending_mem = deps->pending_read_mems;
1007	  while (pending)
1008	    {
1009	      if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
1010		add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1011	      pending = XEXP (pending, 1);
1012	      pending_mem = XEXP (pending_mem, 1);
1013	    }
1014
1015	  add_dependence_list (insn, deps->last_pending_memory_flush,
1016			       REG_DEP_ANTI);
1017	}
1018    }
1019
1020  /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1021     block, then we must be sure that no instructions are scheduled across it.
1022     Otherwise, the reg_n_refs info (which depends on loop_depth) would
1023     become incorrect.  */
1024  if (loop_notes)
1025    {
1026      rtx link;
1027
1028      /* Update loop_notes with any notes from this insn.  Also determine
1029	 if any of the notes on the list correspond to instruction scheduling
1030	 barriers (loop, eh & setjmp notes, but not range notes).  */
1031      link = loop_notes;
1032      while (XEXP (link, 1))
1033	{
1034	  if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
1035	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
1036	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
1037	      || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
1038	    reg_pending_barrier = true;
1039
1040	  link = XEXP (link, 1);
1041	}
1042      XEXP (link, 1) = REG_NOTES (insn);
1043      REG_NOTES (insn) = loop_notes;
1044    }
1045
1046  /* If this instruction can throw an exception, then moving it changes
1047     where block boundaries fall.  This is mighty confusing elsewhere.
1048     Therefore, prevent such an instruction from being moved.  */
1049  if (can_throw_internal (insn))
1050    reg_pending_barrier = true;
1051
1052  /* Add dependencies if a scheduling barrier was found.  */
1053  if (reg_pending_barrier)
1054    {
1055      if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1056	{
1057	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1058	    {
1059	      struct deps_reg *reg_last = &deps->reg_last[i];
1060	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1061	      add_dependence_list (insn, reg_last->sets, 0);
1062	      add_dependence_list (insn, reg_last->clobbers, 0);
1063	    });
1064	}
1065      else
1066	{
1067	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1068	    {
1069	      struct deps_reg *reg_last = &deps->reg_last[i];
1070	      add_dependence_list_and_free (insn, &reg_last->uses,
1071					    REG_DEP_ANTI);
1072	      add_dependence_list_and_free (insn, &reg_last->sets, 0);
1073	      add_dependence_list_and_free (insn, &reg_last->clobbers, 0);
1074	      reg_last->uses_length = 0;
1075	      reg_last->clobbers_length = 0;
1076	    });
1077	}
1078
1079      for (i = 0; i < deps->max_reg; i++)
1080	{
1081	  struct deps_reg *reg_last = &deps->reg_last[i];
1082	  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1083	  SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1084	}
1085
1086      flush_pending_lists (deps, insn, true, true);
1087      CLEAR_REG_SET (&deps->reg_conditional_sets);
1088      reg_pending_barrier = false;
1089    }
1090  else
1091    {
1092      /* If the current insn is conditional, we can't free any
1093	 of the lists.  */
1094      if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1095	{
1096	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1097	    {
1098	      struct deps_reg *reg_last = &deps->reg_last[i];
1099	      add_dependence_list (insn, reg_last->sets, 0);
1100	      add_dependence_list (insn, reg_last->clobbers, 0);
1101	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1102	      reg_last->uses_length++;
1103	    });
1104	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1105	    {
1106	      struct deps_reg *reg_last = &deps->reg_last[i];
1107	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1108	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1109	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1110	      reg_last->clobbers_length++;
1111	    });
1112	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1113	    {
1114	      struct deps_reg *reg_last = &deps->reg_last[i];
1115	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1116	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1117	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1118	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1119	      SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1120	    });
1121	}
1122      else
1123	{
1124	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1125	    {
1126	      struct deps_reg *reg_last = &deps->reg_last[i];
1127	      add_dependence_list (insn, reg_last->sets, 0);
1128	      add_dependence_list (insn, reg_last->clobbers, 0);
1129	      reg_last->uses_length++;
1130	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1131	    });
1132	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1133	    {
1134	      struct deps_reg *reg_last = &deps->reg_last[i];
1135	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1136		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1137		{
1138		  add_dependence_list_and_free (insn, &reg_last->sets,
1139					        REG_DEP_OUTPUT);
1140		  add_dependence_list_and_free (insn, &reg_last->uses,
1141						REG_DEP_ANTI);
1142		  add_dependence_list_and_free (insn, &reg_last->clobbers,
1143						REG_DEP_OUTPUT);
1144		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1145		  reg_last->clobbers_length = 0;
1146		  reg_last->uses_length = 0;
1147		}
1148	      else
1149		{
1150		  add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1151		  add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1152		}
1153	      reg_last->clobbers_length++;
1154	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1155	    });
1156	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1157	    {
1158	      struct deps_reg *reg_last = &deps->reg_last[i];
1159	      add_dependence_list_and_free (insn, &reg_last->sets,
1160					    REG_DEP_OUTPUT);
1161	      add_dependence_list_and_free (insn, &reg_last->clobbers,
1162					    REG_DEP_OUTPUT);
1163	      add_dependence_list_and_free (insn, &reg_last->uses,
1164					    REG_DEP_ANTI);
1165	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1166	      reg_last->uses_length = 0;
1167	      reg_last->clobbers_length = 0;
1168	      CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
1169	    });
1170	}
1171
1172      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1173      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1174      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1175    }
1176  CLEAR_REG_SET (reg_pending_uses);
1177  CLEAR_REG_SET (reg_pending_clobbers);
1178  CLEAR_REG_SET (reg_pending_sets);
1179
1180  /* If we are currently in a libcall scheduling group, then mark the
1181     current insn as being in a scheduling group and that it can not
1182     be moved into a different basic block.  */
1183
1184  if (deps->libcall_block_tail_insn)
1185    {
1186      set_sched_group_p (insn);
1187      CANT_MOVE (insn) = 1;
1188    }
1189
1190  /* If a post-call group is still open, see if it should remain so.
1191     This insn must be a simple move of a hard reg to a pseudo or
1192     vice-versa.
1193
1194     We must avoid moving these insns for correctness on
1195     SMALL_REGISTER_CLASS machines, and for special registers like
1196     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
1197     hard regs for all targets.  */
1198
1199  if (deps->in_post_call_group_p)
1200    {
1201      rtx tmp, set = single_set (insn);
1202      int src_regno, dest_regno;
1203
1204      if (set == NULL)
1205	goto end_call_group;
1206
1207      tmp = SET_DEST (set);
1208      if (GET_CODE (tmp) == SUBREG)
1209	tmp = SUBREG_REG (tmp);
1210      if (GET_CODE (tmp) == REG)
1211	dest_regno = REGNO (tmp);
1212      else
1213	goto end_call_group;
1214
1215      tmp = SET_SRC (set);
1216      if (GET_CODE (tmp) == SUBREG)
1217	tmp = SUBREG_REG (tmp);
1218      if (GET_CODE (tmp) == REG)
1219	src_regno = REGNO (tmp);
1220      else
1221	goto end_call_group;
1222
1223      if (src_regno < FIRST_PSEUDO_REGISTER
1224	  || dest_regno < FIRST_PSEUDO_REGISTER)
1225	{
1226	  set_sched_group_p (insn);
1227	  CANT_MOVE (insn) = 1;
1228	}
1229      else
1230	{
1231	end_call_group:
1232	  deps->in_post_call_group_p = false;
1233	}
1234    }
1235}
1236
1237/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1238   for every dependency.  */
1239
1240void
1241sched_analyze (deps, head, tail)
1242     struct deps *deps;
1243     rtx head, tail;
1244{
1245  rtx insn;
1246  rtx loop_notes = 0;
1247
1248  if (current_sched_info->use_cselib)
1249    cselib_init ();
1250
1251  for (insn = head;; insn = NEXT_INSN (insn))
1252    {
1253      rtx link, end_seq, r0, set;
1254
1255      if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1256	{
1257	  /* Clear out the stale LOG_LINKS from flow.  */
1258	  free_INSN_LIST_list (&LOG_LINKS (insn));
1259
1260	  /* Make each JUMP_INSN a scheduling barrier for memory
1261             references.  */
1262	  if (GET_CODE (insn) == JUMP_INSN)
1263	    {
1264	      /* Keep the list a reasonable size.  */
1265	      if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1266		flush_pending_lists (deps, insn, true, true);
1267	      else
1268		deps->last_pending_memory_flush
1269		  = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1270	    }
1271	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1272	  loop_notes = 0;
1273	}
1274      else if (GET_CODE (insn) == CALL_INSN)
1275	{
1276	  int i;
1277
1278	  CANT_MOVE (insn) = 1;
1279
1280	  /* Clear out the stale LOG_LINKS from flow.  */
1281	  free_INSN_LIST_list (&LOG_LINKS (insn));
1282
1283	  if (find_reg_note (insn, REG_SETJMP, NULL))
1284	    {
1285	      /* This is setjmp.  Assume that all registers, not just
1286		 hard registers, may be clobbered by this call.  */
1287	      reg_pending_barrier = true;
1288	    }
1289	  else
1290	    {
1291	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1292		/* A call may read and modify global register variables.  */
1293		if (global_regs[i])
1294		  {
1295		    SET_REGNO_REG_SET (reg_pending_sets, i);
1296		    SET_REGNO_REG_SET (reg_pending_uses, i);
1297		  }
1298		/* Other call-clobbered hard regs may be clobbered.
1299		   Since we only have a choice between 'might be clobbered'
1300		   and 'definitely not clobbered', we must include all
1301		   partly call-clobbered registers here.  */
1302		else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1303			 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1304		  SET_REGNO_REG_SET (reg_pending_clobbers, i);
1305		/* We don't know what set of fixed registers might be used
1306		   by the function, but it is certain that the stack pointer
1307		   is among them, but be conservative.  */
1308		else if (fixed_regs[i])
1309		  SET_REGNO_REG_SET (reg_pending_uses, i);
1310		/* The frame pointer is normally not used by the function
1311		   itself, but by the debugger.  */
1312		/* ??? MIPS o32 is an exception.  It uses the frame pointer
1313		   in the macro expansion of jal but does not represent this
1314		   fact in the call_insn rtl.  */
1315		else if (i == FRAME_POINTER_REGNUM
1316			 || (i == HARD_FRAME_POINTER_REGNUM
1317			     && (! reload_completed || frame_pointer_needed)))
1318		  SET_REGNO_REG_SET (reg_pending_uses, i);
1319	    }
1320
1321	  /* For each insn which shouldn't cross a call, add a dependence
1322	     between that insn and this call insn.  */
1323	  add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1324					REG_DEP_ANTI);
1325
1326	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1327	  loop_notes = 0;
1328
1329	  /* In the absence of interprocedural alias analysis, we must flush
1330	     all pending reads and writes, and start new dependencies starting
1331	     from here.  But only flush writes for constant calls (which may
1332	     be passed a pointer to something we haven't written yet).  */
1333	  flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1334
1335	  /* Remember the last function call for limiting lifetimes.  */
1336	  free_INSN_LIST_list (&deps->last_function_call);
1337	  deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1338
1339	  /* Before reload, begin a post-call group, so as to keep the
1340	     lifetimes of hard registers correct.  */
1341	  if (! reload_completed)
1342	    deps->in_post_call_group_p = true;
1343	}
1344
1345      /* See comments on reemit_notes as to why we do this.
1346	 ??? Actually, the reemit_notes just say what is done, not why.  */
1347
1348      if (GET_CODE (insn) == NOTE
1349	       && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1350		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1351		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1352		   || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1353	{
1354	  rtx rtx_region;
1355
1356	  if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1357	      || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1358	    rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1359	  else
1360	    rtx_region = GEN_INT (0);
1361
1362	  loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1363					rtx_region,
1364					loop_notes);
1365	  loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1366					GEN_INT (NOTE_LINE_NUMBER (insn)),
1367					loop_notes);
1368	  CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1369	}
1370
1371      if (current_sched_info->use_cselib)
1372	cselib_process_insn (insn);
1373
1374      /* Now that we have completed handling INSN, check and see if it is
1375	 a CLOBBER beginning a libcall block.   If it is, record the
1376	 end of the libcall sequence.
1377
1378	 We want to schedule libcall blocks as a unit before reload.  While
1379	 this restricts scheduling, it preserves the meaning of a libcall
1380	 block.
1381
1382	 As a side effect, we may get better code due to decreased register
1383	 pressure as well as less chance of a foreign insn appearing in
1384	 a libcall block.  */
1385      if (!reload_completed
1386	  /* Note we may have nested libcall sequences.  We only care about
1387	     the outermost libcall sequence.  */
1388	  && deps->libcall_block_tail_insn == 0
1389	  /* The sequence must start with a clobber of a register.  */
1390	  && GET_CODE (insn) == INSN
1391	  && GET_CODE (PATTERN (insn)) == CLOBBER
1392          && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1393	  && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1394	  /* The CLOBBER must also have a REG_LIBCALL note attached.  */
1395	  && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1396	  && (end_seq = XEXP (link, 0)) != 0
1397	  /* The insn referenced by the REG_LIBCALL note must be a
1398	     simple nop copy with the same destination as the register
1399	     mentioned in the clobber.  */
1400	  && (set = single_set (end_seq)) != 0
1401	  && SET_DEST (set) == r0 && SET_SRC (set) == r0
1402	  /* And finally the insn referenced by the REG_LIBCALL must
1403	     also contain a REG_EQUAL note and a REG_RETVAL note.  */
1404	  && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1405	  && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1406	deps->libcall_block_tail_insn = XEXP (link, 0);
1407
1408      /* If we have reached the end of a libcall block, then close the
1409	 block.  */
1410      if (deps->libcall_block_tail_insn == insn)
1411	deps->libcall_block_tail_insn = 0;
1412
1413      if (insn == tail)
1414	{
1415	  if (current_sched_info->use_cselib)
1416	    cselib_finish ();
1417	  return;
1418	}
1419    }
1420  abort ();
1421}
1422
1423/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1424   dependences from LOG_LINKS to build forward dependences in
1425   INSN_DEPEND.  */
1426
1427void
1428compute_forward_dependences (head, tail)
1429     rtx head, tail;
1430{
1431  rtx insn, link;
1432  rtx next_tail;
1433  enum reg_note dep_type;
1434
1435  next_tail = NEXT_INSN (tail);
1436  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1437    {
1438      if (! INSN_P (insn))
1439	continue;
1440
1441      insn = group_leader (insn);
1442
1443      for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1444	{
1445	  rtx x = group_leader (XEXP (link, 0));
1446	  rtx new_link;
1447
1448	  if (x != XEXP (link, 0))
1449	    continue;
1450
1451#ifdef ENABLE_CHECKING
1452	  /* If add_dependence is working properly there should never
1453	     be notes, deleted insns or duplicates in the backward
1454	     links.  Thus we need not check for them here.
1455
1456	     However, if we have enabled checking we might as well go
1457	     ahead and verify that add_dependence worked properly.  */
1458	  if (GET_CODE (x) == NOTE
1459	      || INSN_DELETED_P (x)
1460	      || (forward_dependency_cache != NULL
1461		  && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1462			       INSN_LUID (insn)))
1463	      || (forward_dependency_cache == NULL
1464		  && find_insn_list (insn, INSN_DEPEND (x))))
1465	    abort ();
1466	  if (forward_dependency_cache != NULL)
1467	    SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1468		     INSN_LUID (insn));
1469#endif
1470
1471	  new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1472
1473	  dep_type = REG_NOTE_KIND (link);
1474	  PUT_REG_NOTE_KIND (new_link, dep_type);
1475
1476	  INSN_DEPEND (x) = new_link;
1477	  INSN_DEP_COUNT (insn) += 1;
1478	}
1479    }
1480}
1481
1482/* Initialize variables for region data dependence analysis.
1483   n_bbs is the number of region blocks.  */
1484
1485void
1486init_deps (deps)
1487     struct deps *deps;
1488{
1489  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1490
1491  deps->max_reg = max_reg;
1492  deps->reg_last = (struct deps_reg *)
1493    xcalloc (max_reg, sizeof (struct deps_reg));
1494  INIT_REG_SET (&deps->reg_last_in_use);
1495  INIT_REG_SET (&deps->reg_conditional_sets);
1496
1497  deps->pending_read_insns = 0;
1498  deps->pending_read_mems = 0;
1499  deps->pending_write_insns = 0;
1500  deps->pending_write_mems = 0;
1501  deps->pending_lists_length = 0;
1502  deps->pending_flush_length = 0;
1503  deps->last_pending_memory_flush = 0;
1504  deps->last_function_call = 0;
1505  deps->sched_before_next_call = 0;
1506  deps->in_post_call_group_p = false;
1507  deps->libcall_block_tail_insn = 0;
1508}
1509
1510/* Free insn lists found in DEPS.  */
1511
1512void
1513free_deps (deps)
1514     struct deps *deps;
1515{
1516  int i;
1517
1518  free_INSN_LIST_list (&deps->pending_read_insns);
1519  free_EXPR_LIST_list (&deps->pending_read_mems);
1520  free_INSN_LIST_list (&deps->pending_write_insns);
1521  free_EXPR_LIST_list (&deps->pending_write_mems);
1522  free_INSN_LIST_list (&deps->last_pending_memory_flush);
1523
1524  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1525     times.  For a test case with 42000 regs and 8000 small basic blocks,
1526     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
1527  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1528    {
1529      struct deps_reg *reg_last = &deps->reg_last[i];
1530      if (reg_last->uses)
1531	free_INSN_LIST_list (&reg_last->uses);
1532      if (reg_last->sets)
1533	free_INSN_LIST_list (&reg_last->sets);
1534      if (reg_last->clobbers)
1535	free_INSN_LIST_list (&reg_last->clobbers);
1536    });
1537  CLEAR_REG_SET (&deps->reg_last_in_use);
1538  CLEAR_REG_SET (&deps->reg_conditional_sets);
1539
1540  free (deps->reg_last);
1541}
1542
1543/* If it is profitable to use them, initialize caches for tracking
1544   dependency informatino.  LUID is the number of insns to be scheduled,
1545   it is used in the estimate of profitability.  */
1546
1547void
1548init_dependency_caches (luid)
1549     int luid;
1550{
1551  /* ?!? We could save some memory by computing a per-region luid mapping
1552     which could reduce both the number of vectors in the cache and the size
1553     of each vector.  Instead we just avoid the cache entirely unless the
1554     average number of instructions in a basic block is very high.  See
1555     the comment before the declaration of true_dependency_cache for
1556     what we consider "very high".  */
1557  if (luid / n_basic_blocks > 100 * 5)
1558    {
1559      true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1560      sbitmap_vector_zero (true_dependency_cache, luid);
1561      anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1562      sbitmap_vector_zero (anti_dependency_cache, luid);
1563      output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1564      sbitmap_vector_zero (output_dependency_cache, luid);
1565#ifdef ENABLE_CHECKING
1566      forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1567      sbitmap_vector_zero (forward_dependency_cache, luid);
1568#endif
1569    }
1570}
1571
1572/* Free the caches allocated in init_dependency_caches.  */
1573
1574void
1575free_dependency_caches ()
1576{
1577  if (true_dependency_cache)
1578    {
1579      sbitmap_vector_free (true_dependency_cache);
1580      true_dependency_cache = NULL;
1581      sbitmap_vector_free (anti_dependency_cache);
1582      anti_dependency_cache = NULL;
1583      sbitmap_vector_free (output_dependency_cache);
1584      output_dependency_cache = NULL;
1585#ifdef ENABLE_CHECKING
1586      sbitmap_vector_free (forward_dependency_cache);
1587      forward_dependency_cache = NULL;
1588#endif
1589    }
1590}
1591
1592/* Initialize some global variables needed by the dependency analysis
1593   code.  */
1594
1595void
1596init_deps_global ()
1597{
1598  reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1599  reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1600  reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1601  reg_pending_barrier = false;
1602}
1603
1604/* Free everything used by the dependency analysis code.  */
1605
1606void
1607finish_deps_global ()
1608{
1609  FREE_REG_SET (reg_pending_sets);
1610  FREE_REG_SET (reg_pending_clobbers);
1611  FREE_REG_SET (reg_pending_uses);
1612}
1613