1/* Alias analysis for GNU C
2   Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3   Free Software Foundation, Inc.
4   Contributed by John Carr (jfc@mit.edu).
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING.  If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA.  */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "rtl.h"
28#include "tree.h"
29#include "tm_p.h"
30#include "function.h"
31#include "alias.h"
32#include "emit-rtl.h"
33#include "regs.h"
34#include "hard-reg-set.h"
35#include "basic-block.h"
36#include "flags.h"
37#include "output.h"
38#include "toplev.h"
39#include "cselib.h"
40#include "splay-tree.h"
41#include "ggc.h"
42#include "langhooks.h"
43#include "timevar.h"
44#include "target.h"
45#include "cgraph.h"
46#include "varray.h"
47#include "tree-pass.h"
48#include "ipa-type-escape.h"
49
50/* The aliasing API provided here solves related but different problems:
51
52   Say there exists (in c)
53
54   struct X {
55     struct Y y1;
56     struct Z z2;
57   } x1, *px1,  *px2;
58
59   struct Y y2, *py;
60   struct Z z2, *pz;
61
62
63   py = &px1.y1;
64   px2 = &x1;
65
66   Consider the four questions:
67
68   Can a store to x1 interfere with px2->y1?
69   Can a store to x1 interfere with px2->z2?
70   (*px2).z2
71   Can a store to x1 change the value pointed to by with py?
72   Can a store to x1 change the value pointed to by with pz?
73
74   The answer to these questions can be yes, yes, yes, and maybe.
75
76   The first two questions can be answered with a simple examination
77   of the type system.  If structure X contains a field of type Y then
78   a store thru a pointer to an X can overwrite any field that is
79   contained (recursively) in an X (unless we know that px1 != px2).
80
81   The last two of the questions can be solved in the same way as the
82   first two questions but this is too conservative.  The observation
83   is that in some cases analysis we can know if which (if any) fields
84   are addressed and if those addresses are used in bad ways.  This
85   analysis may be language specific.  In C, arbitrary operations may
86   be applied to pointers.  However, there is some indication that
87   this may be too conservative for some C++ types.
88
89   The pass ipa-type-escape does this analysis for the types whose
90   instances do not escape across the compilation boundary.
91
92   Historically in GCC, these two problems were combined and a single
93   data structure was used to represent the solution to these
94   problems.  We now have two similar but different data structures,
95   The data structure to solve the last two question is similar to the
96   first, but does not contain have the fields in it whose address are
97   never taken.  For types that do escape the compilation unit, the
98   data structures will have identical information.
99*/
100
101/* The alias sets assigned to MEMs assist the back-end in determining
102   which MEMs can alias which other MEMs.  In general, two MEMs in
103   different alias sets cannot alias each other, with one important
104   exception.  Consider something like:
105
106     struct S { int i; double d; };
107
108   a store to an `S' can alias something of either type `int' or type
109   `double'.  (However, a store to an `int' cannot alias a `double'
110   and vice versa.)  We indicate this via a tree structure that looks
111   like:
112           struct S
113            /   \
114	   /     \
115         |/_     _\|
116         int    double
117
118   (The arrows are directed and point downwards.)
119    In this situation we say the alias set for `struct S' is the
120   `superset' and that those for `int' and `double' are `subsets'.
121
122   To see whether two alias sets can point to the same memory, we must
123   see if either alias set is a subset of the other. We need not trace
124   past immediate descendants, however, since we propagate all
125   grandchildren up one level.
126
127   Alias set zero is implicitly a superset of all other alias sets.
128   However, this is no actual entry for alias set zero.  It is an
129   error to attempt to explicitly construct a subset of zero.  */
130
131struct alias_set_entry GTY(())
132{
133  /* The alias set number, as stored in MEM_ALIAS_SET.  */
134  HOST_WIDE_INT alias_set;
135
136  /* The children of the alias set.  These are not just the immediate
137     children, but, in fact, all descendants.  So, if we have:
138
139       struct T { struct S s; float f; }
140
141     continuing our example above, the children here will be all of
142     `int', `double', `float', and `struct S'.  */
143  splay_tree GTY((param1_is (int), param2_is (int))) children;
144
145  /* Nonzero if would have a child of zero: this effectively makes this
146     alias set the same as alias set zero.  */
147  int has_zero_child;
148};
149typedef struct alias_set_entry *alias_set_entry;
150
151static int rtx_equal_for_memref_p (rtx, rtx);
152static rtx find_symbolic_term (rtx);
153static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
154static void record_set (rtx, rtx, void *);
155static int base_alias_check (rtx, rtx, enum machine_mode,
156			     enum machine_mode);
157static rtx find_base_value (rtx);
158static int mems_in_disjoint_alias_sets_p (rtx, rtx);
159static int insert_subset_children (splay_tree_node, void*);
160static tree find_base_decl (tree);
161static alias_set_entry get_alias_set_entry (HOST_WIDE_INT);
162static rtx fixed_scalar_and_varying_struct_p (rtx, rtx, rtx, rtx,
163					      int (*) (rtx, int));
164static int aliases_everything_p (rtx);
165static bool nonoverlapping_component_refs_p (tree, tree);
166static tree decl_for_component_ref (tree);
167static rtx adjust_offset_for_component_ref (tree, rtx);
168static int nonoverlapping_memrefs_p (rtx, rtx);
169static int write_dependence_p (rtx, rtx, int);
170
171static void memory_modified_1 (rtx, rtx, void *);
172static void record_alias_subset (HOST_WIDE_INT, HOST_WIDE_INT);
173
174/* Set up all info needed to perform alias analysis on memory references.  */
175
176/* Returns the size in bytes of the mode of X.  */
177#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
178
179/* Returns nonzero if MEM1 and MEM2 do not alias because they are in
180   different alias sets.  We ignore alias sets in functions making use
181   of variable arguments because the va_arg macros on some systems are
182   not legal ANSI C.  */
183#define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2)			\
184  mems_in_disjoint_alias_sets_p (MEM1, MEM2)
185
186/* Cap the number of passes we make over the insns propagating alias
187   information through set chains.   10 is a completely arbitrary choice.  */
188#define MAX_ALIAS_LOOP_PASSES 10
189
190/* reg_base_value[N] gives an address to which register N is related.
191   If all sets after the first add or subtract to the current value
192   or otherwise modify it so it does not point to a different top level
193   object, reg_base_value[N] is equal to the address part of the source
194   of the first set.
195
196   A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
197   expressions represent certain special values: function arguments and
198   the stack, frame, and argument pointers.
199
200   The contents of an ADDRESS is not normally used, the mode of the
201   ADDRESS determines whether the ADDRESS is a function argument or some
202   other special value.  Pointer equality, not rtx_equal_p, determines whether
203   two ADDRESS expressions refer to the same base address.
204
205   The only use of the contents of an ADDRESS is for determining if the
206   current function performs nonlocal memory memory references for the
207   purposes of marking the function as a constant function.  */
208
209static GTY(()) varray_type reg_base_value;
210static rtx *new_reg_base_value;
211
212/* We preserve the copy of old array around to avoid amount of garbage
213   produced.  About 8% of garbage produced were attributed to this
214   array.  */
215static GTY((deletable)) varray_type old_reg_base_value;
216
217/* Static hunks of RTL used by the aliasing code; these are initialized
218   once per function to avoid unnecessary RTL allocations.  */
219static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
220
221#define REG_BASE_VALUE(X) \
222  (reg_base_value && REGNO (X) < VARRAY_SIZE (reg_base_value) \
223   ? VARRAY_RTX (reg_base_value, REGNO (X)) : 0)
224
225/* Vector of known invariant relationships between registers.  Set in
226   loop unrolling.  Indexed by register number, if nonzero the value
227   is an expression describing this register in terms of another.
228
229   The length of this array is REG_BASE_VALUE_SIZE.
230
231   Because this array contains only pseudo registers it has no effect
232   after reload.  */
233static GTY((length("alias_invariant_size"))) rtx *alias_invariant;
234static GTY(()) unsigned int alias_invariant_size;
235
236/* Vector indexed by N giving the initial (unchanging) value known for
237   pseudo-register N.  This array is initialized in init_alias_analysis,
238   and does not change until end_alias_analysis is called.  */
239static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
240
241/* Indicates number of valid entries in reg_known_value.  */
242static GTY(()) unsigned int reg_known_value_size;
243
244/* Vector recording for each reg_known_value whether it is due to a
245   REG_EQUIV note.  Future passes (viz., reload) may replace the
246   pseudo with the equivalent expression and so we account for the
247   dependences that would be introduced if that happens.
248
249   The REG_EQUIV notes created in assign_parms may mention the arg
250   pointer, and there are explicit insns in the RTL that modify the
251   arg pointer.  Thus we must ensure that such insns don't get
252   scheduled across each other because that would invalidate the
253   REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
254   wrong, but solving the problem in the scheduler will likely give
255   better code, so we do it here.  */
256static bool *reg_known_equiv_p;
257
258/* True when scanning insns from the start of the rtl to the
259   NOTE_INSN_FUNCTION_BEG note.  */
260static bool copying_arguments;
261
262/* The splay-tree used to store the various alias set entries.  */
263static GTY ((param_is (struct alias_set_entry))) varray_type alias_sets;
264
265/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
266   such an entry, or NULL otherwise.  */
267
268static inline alias_set_entry
269get_alias_set_entry (HOST_WIDE_INT alias_set)
270{
271  return (alias_set_entry)VARRAY_GENERIC_PTR (alias_sets, alias_set);
272}
273
274/* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
275   the two MEMs cannot alias each other.  */
276
277static inline int
278mems_in_disjoint_alias_sets_p (rtx mem1, rtx mem2)
279{
280/* Perform a basic sanity check.  Namely, that there are no alias sets
281   if we're not using strict aliasing.  This helps to catch bugs
282   whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
283   where a MEM is allocated in some way other than by the use of
284   gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared.  If we begin to
285   use alias sets to indicate that spilled registers cannot alias each
286   other, we might need to remove this check.  */
287  gcc_assert (flag_strict_aliasing
288	      || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
289
290  return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
291}
292
293/* Insert the NODE into the splay tree given by DATA.  Used by
294   record_alias_subset via splay_tree_foreach.  */
295
296static int
297insert_subset_children (splay_tree_node node, void *data)
298{
299  splay_tree_insert ((splay_tree) data, node->key, node->value);
300
301  return 0;
302}
303
304/* Return 1 if the two specified alias sets may conflict.  */
305
306int
307alias_sets_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2)
308{
309  alias_set_entry ase;
310
311  /* If have no alias set information for one of the operands, we have
312     to assume it can alias anything.  */
313  if (set1 == 0 || set2 == 0
314      /* If the two alias sets are the same, they may alias.  */
315      || set1 == set2)
316    return 1;
317
318  /* See if the first alias set is a subset of the second.  */
319  ase = get_alias_set_entry (set1);
320  if (ase != 0
321      && (ase->has_zero_child
322	  || splay_tree_lookup (ase->children,
323				(splay_tree_key) set2)))
324    return 1;
325
326  /* Now do the same, but with the alias sets reversed.  */
327  ase = get_alias_set_entry (set2);
328  if (ase != 0
329      && (ase->has_zero_child
330	  || splay_tree_lookup (ase->children,
331				(splay_tree_key) set1)))
332    return 1;
333
334  /* The two alias sets are distinct and neither one is the
335     child of the other.  Therefore, they cannot alias.  */
336  return 0;
337}
338
339/* Return 1 if the two specified alias sets might conflict, or if any subtype
340   of these alias sets might conflict.  */
341
342int
343alias_sets_might_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2)
344{
345  if (set1 == 0 || set2 == 0 || set1 == set2)
346    return 1;
347
348  return 0;
349}
350
351
352/* Return 1 if any MEM object of type T1 will always conflict (using the
353   dependency routines in this file) with any MEM object of type T2.
354   This is used when allocating temporary storage.  If T1 and/or T2 are
355   NULL_TREE, it means we know nothing about the storage.  */
356
357int
358objects_must_conflict_p (tree t1, tree t2)
359{
360  HOST_WIDE_INT set1, set2;
361
362  /* If neither has a type specified, we don't know if they'll conflict
363     because we may be using them to store objects of various types, for
364     example the argument and local variables areas of inlined functions.  */
365  if (t1 == 0 && t2 == 0)
366    return 0;
367
368  /* If they are the same type, they must conflict.  */
369  if (t1 == t2
370      /* Likewise if both are volatile.  */
371      || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
372    return 1;
373
374  set1 = t1 ? get_alias_set (t1) : 0;
375  set2 = t2 ? get_alias_set (t2) : 0;
376
377  /* Otherwise they conflict if they have no alias set or the same. We
378     can't simply use alias_sets_conflict_p here, because we must make
379     sure that every subtype of t1 will conflict with every subtype of
380     t2 for which a pair of subobjects of these respective subtypes
381     overlaps on the stack.  */
382  return set1 == 0 || set2 == 0 || set1 == set2;
383}
384
385/* T is an expression with pointer type.  Find the DECL on which this
386   expression is based.  (For example, in `a[i]' this would be `a'.)
387   If there is no such DECL, or a unique decl cannot be determined,
388   NULL_TREE is returned.  */
389
390static tree
391find_base_decl (tree t)
392{
393  tree d0, d1;
394
395  if (t == 0 || t == error_mark_node || ! POINTER_TYPE_P (TREE_TYPE (t)))
396    return 0;
397
398  /* If this is a declaration, return it.  If T is based on a restrict
399     qualified decl, return that decl.  */
400  if (DECL_P (t))
401    {
402      if (TREE_CODE (t) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (t))
403	t = DECL_GET_RESTRICT_BASE (t);
404      return t;
405    }
406
407  /* Handle general expressions.  It would be nice to deal with
408     COMPONENT_REFs here.  If we could tell that `a' and `b' were the
409     same, then `a->f' and `b->f' are also the same.  */
410  switch (TREE_CODE_CLASS (TREE_CODE (t)))
411    {
412    case tcc_unary:
413      return find_base_decl (TREE_OPERAND (t, 0));
414
415    case tcc_binary:
416      /* Return 0 if found in neither or both are the same.  */
417      d0 = find_base_decl (TREE_OPERAND (t, 0));
418      d1 = find_base_decl (TREE_OPERAND (t, 1));
419      if (d0 == d1)
420	return d0;
421      else if (d0 == 0)
422	return d1;
423      else if (d1 == 0)
424	return d0;
425      else
426	return 0;
427
428    default:
429      return 0;
430    }
431}
432
433/* Return true if all nested component references handled by
434   get_inner_reference in T are such that we should use the alias set
435   provided by the object at the heart of T.
436
437   This is true for non-addressable components (which don't have their
438   own alias set), as well as components of objects in alias set zero.
439   This later point is a special case wherein we wish to override the
440   alias set used by the component, but we don't have per-FIELD_DECL
441   assignable alias sets.  */
442
443bool
444component_uses_parent_alias_set (tree t)
445{
446  while (1)
447    {
448      /* If we're at the end, it vacuously uses its own alias set.  */
449      if (!handled_component_p (t))
450	return false;
451
452      switch (TREE_CODE (t))
453	{
454	case COMPONENT_REF:
455	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
456	    return true;
457	  break;
458
459	case ARRAY_REF:
460	case ARRAY_RANGE_REF:
461	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
462	    return true;
463	  break;
464
465	case REALPART_EXPR:
466	case IMAGPART_EXPR:
467	  break;
468
469	default:
470	  /* Bitfields and casts are never addressable.  */
471	  return true;
472	}
473
474      t = TREE_OPERAND (t, 0);
475      if (get_alias_set (TREE_TYPE (t)) == 0)
476	return true;
477    }
478}
479
480/* Return the alias set for T, which may be either a type or an
481   expression.  Call language-specific routine for help, if needed.  */
482
483HOST_WIDE_INT
484get_alias_set (tree t)
485{
486  HOST_WIDE_INT set;
487
488  /* If we're not doing any alias analysis, just assume everything
489     aliases everything else.  Also return 0 if this or its type is
490     an error.  */
491  if (! flag_strict_aliasing || t == error_mark_node
492      || (! TYPE_P (t)
493	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
494    return 0;
495
496  /* We can be passed either an expression or a type.  This and the
497     language-specific routine may make mutually-recursive calls to each other
498     to figure out what to do.  At each juncture, we see if this is a tree
499     that the language may need to handle specially.  First handle things that
500     aren't types.  */
501  if (! TYPE_P (t))
502    {
503      tree inner = t;
504
505      /* Remove any nops, then give the language a chance to do
506	 something with this tree before we look at it.  */
507      STRIP_NOPS (t);
508      set = lang_hooks.get_alias_set (t);
509      if (set != -1)
510	return set;
511
512      /* First see if the actual object referenced is an INDIRECT_REF from a
513	 restrict-qualified pointer or a "void *".  */
514      while (handled_component_p (inner))
515	{
516	  inner = TREE_OPERAND (inner, 0);
517	  STRIP_NOPS (inner);
518	}
519
520      /* Check for accesses through restrict-qualified pointers.  */
521      if (INDIRECT_REF_P (inner))
522	{
523	  tree decl = find_base_decl (TREE_OPERAND (inner, 0));
524
525	  if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl))
526	    {
527	      /* If we haven't computed the actual alias set, do it now.  */
528	      if (DECL_POINTER_ALIAS_SET (decl) == -2)
529		{
530		  tree pointed_to_type = TREE_TYPE (TREE_TYPE (decl));
531
532		  /* No two restricted pointers can point at the same thing.
533		     However, a restricted pointer can point at the same thing
534		     as an unrestricted pointer, if that unrestricted pointer
535		     is based on the restricted pointer.  So, we make the
536		     alias set for the restricted pointer a subset of the
537		     alias set for the type pointed to by the type of the
538		     decl.  */
539		  HOST_WIDE_INT pointed_to_alias_set
540		    = get_alias_set (pointed_to_type);
541
542		  if (pointed_to_alias_set == 0)
543		    /* It's not legal to make a subset of alias set zero.  */
544		    DECL_POINTER_ALIAS_SET (decl) = 0;
545		  else if (AGGREGATE_TYPE_P (pointed_to_type))
546		    /* For an aggregate, we must treat the restricted
547		       pointer the same as an ordinary pointer.  If we
548		       were to make the type pointed to by the
549		       restricted pointer a subset of the pointed-to
550		       type, then we would believe that other subsets
551		       of the pointed-to type (such as fields of that
552		       type) do not conflict with the type pointed to
553		       by the restricted pointer.  */
554		    DECL_POINTER_ALIAS_SET (decl)
555		      = pointed_to_alias_set;
556		  else
557		    {
558		      DECL_POINTER_ALIAS_SET (decl) = new_alias_set ();
559		      record_alias_subset (pointed_to_alias_set,
560					   DECL_POINTER_ALIAS_SET (decl));
561		    }
562		}
563
564	      /* We use the alias set indicated in the declaration.  */
565	      return DECL_POINTER_ALIAS_SET (decl);
566	    }
567
568	  /* If we have an INDIRECT_REF via a void pointer, we don't
569	     know anything about what that might alias.  Likewise if the
570	     pointer is marked that way.  */
571	  else if (TREE_CODE (TREE_TYPE (inner)) == VOID_TYPE
572		   || (TYPE_REF_CAN_ALIAS_ALL
573		       (TREE_TYPE (TREE_OPERAND (inner, 0)))))
574	    return 0;
575	}
576
577      /* Otherwise, pick up the outermost object that we could have a pointer
578	 to, processing conversions as above.  */
579      while (component_uses_parent_alias_set (t))
580	{
581	  t = TREE_OPERAND (t, 0);
582	  STRIP_NOPS (t);
583	}
584
585      /* If we've already determined the alias set for a decl, just return
586	 it.  This is necessary for C++ anonymous unions, whose component
587	 variables don't look like union members (boo!).  */
588      if (TREE_CODE (t) == VAR_DECL
589	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
590	return MEM_ALIAS_SET (DECL_RTL (t));
591
592      /* Now all we care about is the type.  */
593      t = TREE_TYPE (t);
594    }
595
596  /* Variant qualifiers don't affect the alias set, so get the main
597     variant. If this is a type with a known alias set, return it.  */
598  t = TYPE_MAIN_VARIANT (t);
599  if (TYPE_ALIAS_SET_KNOWN_P (t))
600    return TYPE_ALIAS_SET (t);
601
602  /* See if the language has special handling for this type.  */
603  set = lang_hooks.get_alias_set (t);
604  if (set != -1)
605    return set;
606
607  /* There are no objects of FUNCTION_TYPE, so there's no point in
608     using up an alias set for them.  (There are, of course, pointers
609     and references to functions, but that's different.)  */
610  else if (TREE_CODE (t) == FUNCTION_TYPE)
611    set = 0;
612
613  /* Unless the language specifies otherwise, let vector types alias
614     their components.  This avoids some nasty type punning issues in
615     normal usage.  And indeed lets vectors be treated more like an
616     array slice.  */
617  else if (TREE_CODE (t) == VECTOR_TYPE)
618    set = get_alias_set (TREE_TYPE (t));
619
620  else
621    /* Otherwise make a new alias set for this type.  */
622    set = new_alias_set ();
623
624  TYPE_ALIAS_SET (t) = set;
625
626  /* If this is an aggregate type, we must record any component aliasing
627     information.  */
628  if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
629    record_component_aliases (t);
630
631  return set;
632}
633
634/* Return a brand-new alias set.  */
635
636static GTY(()) HOST_WIDE_INT last_alias_set;
637
638HOST_WIDE_INT
639new_alias_set (void)
640{
641  if (flag_strict_aliasing)
642    {
643      if (!alias_sets)
644	VARRAY_GENERIC_PTR_INIT (alias_sets, 10, "alias sets");
645      else
646	VARRAY_GROW (alias_sets, last_alias_set + 2);
647      return ++last_alias_set;
648    }
649  else
650    return 0;
651}
652
653/* Indicate that things in SUBSET can alias things in SUPERSET, but that
654   not everything that aliases SUPERSET also aliases SUBSET.  For example,
655   in C, a store to an `int' can alias a load of a structure containing an
656   `int', and vice versa.  But it can't alias a load of a 'double' member
657   of the same structure.  Here, the structure would be the SUPERSET and
658   `int' the SUBSET.  This relationship is also described in the comment at
659   the beginning of this file.
660
661   This function should be called only once per SUPERSET/SUBSET pair.
662
663   It is illegal for SUPERSET to be zero; everything is implicitly a
664   subset of alias set zero.  */
665
666static void
667record_alias_subset (HOST_WIDE_INT superset, HOST_WIDE_INT subset)
668{
669  alias_set_entry superset_entry;
670  alias_set_entry subset_entry;
671
672  /* It is possible in complex type situations for both sets to be the same,
673     in which case we can ignore this operation.  */
674  if (superset == subset)
675    return;
676
677  gcc_assert (superset);
678
679  superset_entry = get_alias_set_entry (superset);
680  if (superset_entry == 0)
681    {
682      /* Create an entry for the SUPERSET, so that we have a place to
683	 attach the SUBSET.  */
684      superset_entry = ggc_alloc (sizeof (struct alias_set_entry));
685      superset_entry->alias_set = superset;
686      superset_entry->children
687	= splay_tree_new_ggc (splay_tree_compare_ints);
688      superset_entry->has_zero_child = 0;
689      VARRAY_GENERIC_PTR (alias_sets, superset) = superset_entry;
690    }
691
692  if (subset == 0)
693    superset_entry->has_zero_child = 1;
694  else
695    {
696      subset_entry = get_alias_set_entry (subset);
697      /* If there is an entry for the subset, enter all of its children
698	 (if they are not already present) as children of the SUPERSET.  */
699      if (subset_entry)
700	{
701	  if (subset_entry->has_zero_child)
702	    superset_entry->has_zero_child = 1;
703
704	  splay_tree_foreach (subset_entry->children, insert_subset_children,
705			      superset_entry->children);
706	}
707
708      /* Enter the SUBSET itself as a child of the SUPERSET.  */
709      splay_tree_insert (superset_entry->children,
710			 (splay_tree_key) subset, 0);
711    }
712}
713
714/* Record that component types of TYPE, if any, are part of that type for
715   aliasing purposes.  For record types, we only record component types
716   for fields that are marked addressable.  For array types, we always
717   record the component types, so the front end should not call this
718   function if the individual component aren't addressable.  */
719
720void
721record_component_aliases (tree type)
722{
723  HOST_WIDE_INT superset = get_alias_set (type);
724  tree field;
725
726  if (superset == 0)
727    return;
728
729  switch (TREE_CODE (type))
730    {
731    case ARRAY_TYPE:
732      if (! TYPE_NONALIASED_COMPONENT (type))
733	record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
734      break;
735
736    case RECORD_TYPE:
737    case UNION_TYPE:
738    case QUAL_UNION_TYPE:
739      /* Recursively record aliases for the base classes, if there are any.  */
740      if (TYPE_BINFO (type))
741	{
742	  int i;
743	  tree binfo, base_binfo;
744
745	  for (binfo = TYPE_BINFO (type), i = 0;
746	       BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
747	    record_alias_subset (superset,
748				 get_alias_set (BINFO_TYPE (base_binfo)));
749	}
750      for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
751	if (TREE_CODE (field) == FIELD_DECL && ! DECL_NONADDRESSABLE_P (field))
752	  record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
753      break;
754
755    case COMPLEX_TYPE:
756      record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
757      break;
758
759    default:
760      break;
761    }
762}
763
764/* Allocate an alias set for use in storing and reading from the varargs
765   spill area.  */
766
767static GTY(()) HOST_WIDE_INT varargs_set = -1;
768
769HOST_WIDE_INT
770get_varargs_alias_set (void)
771{
772#if 1
773  /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
774     varargs alias set to an INDIRECT_REF (FIXME!), so we can't
775     consistently use the varargs alias set for loads from the varargs
776     area.  So don't use it anywhere.  */
777  return 0;
778#else
779  if (varargs_set == -1)
780    varargs_set = new_alias_set ();
781
782  return varargs_set;
783#endif
784}
785
786/* Likewise, but used for the fixed portions of the frame, e.g., register
787   save areas.  */
788
789static GTY(()) HOST_WIDE_INT frame_set = -1;
790
791HOST_WIDE_INT
792get_frame_alias_set (void)
793{
794  if (frame_set == -1)
795    frame_set = new_alias_set ();
796
797  return frame_set;
798}
799
800/* Inside SRC, the source of a SET, find a base address.  */
801
802static rtx
803find_base_value (rtx src)
804{
805  unsigned int regno;
806
807  switch (GET_CODE (src))
808    {
809    case SYMBOL_REF:
810    case LABEL_REF:
811      return src;
812
813    case REG:
814      regno = REGNO (src);
815      /* At the start of a function, argument registers have known base
816	 values which may be lost later.  Returning an ADDRESS
817	 expression here allows optimization based on argument values
818	 even when the argument registers are used for other purposes.  */
819      if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
820	return new_reg_base_value[regno];
821
822      /* If a pseudo has a known base value, return it.  Do not do this
823	 for non-fixed hard regs since it can result in a circular
824	 dependency chain for registers which have values at function entry.
825
826	 The test above is not sufficient because the scheduler may move
827	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
828      if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
829	  && regno < VARRAY_SIZE (reg_base_value))
830	{
831	  /* If we're inside init_alias_analysis, use new_reg_base_value
832	     to reduce the number of relaxation iterations.  */
833	  if (new_reg_base_value && new_reg_base_value[regno]
834	      && REG_N_SETS (regno) == 1)
835	    return new_reg_base_value[regno];
836
837	  if (VARRAY_RTX (reg_base_value, regno))
838	    return VARRAY_RTX (reg_base_value, regno);
839	}
840
841      return 0;
842
843    case MEM:
844      /* Check for an argument passed in memory.  Only record in the
845	 copying-arguments block; it is too hard to track changes
846	 otherwise.  */
847      if (copying_arguments
848	  && (XEXP (src, 0) == arg_pointer_rtx
849	      || (GET_CODE (XEXP (src, 0)) == PLUS
850		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
851	return gen_rtx_ADDRESS (VOIDmode, src);
852      return 0;
853
854    case CONST:
855      src = XEXP (src, 0);
856      if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
857	break;
858
859      /* ... fall through ...  */
860
861    case PLUS:
862    case MINUS:
863      {
864	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
865
866	/* If either operand is a REG that is a known pointer, then it
867	   is the base.  */
868	if (REG_P (src_0) && REG_POINTER (src_0))
869	  return find_base_value (src_0);
870	if (REG_P (src_1) && REG_POINTER (src_1))
871	  return find_base_value (src_1);
872
873	/* If either operand is a REG, then see if we already have
874	   a known value for it.  */
875	if (REG_P (src_0))
876	  {
877	    temp = find_base_value (src_0);
878	    if (temp != 0)
879	      src_0 = temp;
880	  }
881
882	if (REG_P (src_1))
883	  {
884	    temp = find_base_value (src_1);
885	    if (temp!= 0)
886	      src_1 = temp;
887	  }
888
889	/* If either base is named object or a special address
890	   (like an argument or stack reference), then use it for the
891	   base term.  */
892	if (src_0 != 0
893	    && (GET_CODE (src_0) == SYMBOL_REF
894		|| GET_CODE (src_0) == LABEL_REF
895		|| (GET_CODE (src_0) == ADDRESS
896		    && GET_MODE (src_0) != VOIDmode)))
897	  return src_0;
898
899	if (src_1 != 0
900	    && (GET_CODE (src_1) == SYMBOL_REF
901		|| GET_CODE (src_1) == LABEL_REF
902		|| (GET_CODE (src_1) == ADDRESS
903		    && GET_MODE (src_1) != VOIDmode)))
904	  return src_1;
905
906	/* Guess which operand is the base address:
907	   If either operand is a symbol, then it is the base.  If
908	   either operand is a CONST_INT, then the other is the base.  */
909	if (GET_CODE (src_1) == CONST_INT || CONSTANT_P (src_0))
910	  return find_base_value (src_0);
911	else if (GET_CODE (src_0) == CONST_INT || CONSTANT_P (src_1))
912	  return find_base_value (src_1);
913
914	return 0;
915      }
916
917    case LO_SUM:
918      /* The standard form is (lo_sum reg sym) so look only at the
919	 second operand.  */
920      return find_base_value (XEXP (src, 1));
921
922    case AND:
923      /* If the second operand is constant set the base
924	 address to the first operand.  */
925      if (GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) != 0)
926	return find_base_value (XEXP (src, 0));
927      return 0;
928
929    case TRUNCATE:
930      if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
931	break;
932      /* Fall through.  */
933    case HIGH:
934    case PRE_INC:
935    case PRE_DEC:
936    case POST_INC:
937    case POST_DEC:
938    case PRE_MODIFY:
939    case POST_MODIFY:
940      return find_base_value (XEXP (src, 0));
941
942    case ZERO_EXTEND:
943    case SIGN_EXTEND:	/* used for NT/Alpha pointers */
944      {
945	rtx temp = find_base_value (XEXP (src, 0));
946
947	if (temp != 0 && CONSTANT_P (temp))
948	  temp = convert_memory_address (Pmode, temp);
949
950	return temp;
951      }
952
953    default:
954      break;
955    }
956
957  return 0;
958}
959
960/* Called from init_alias_analysis indirectly through note_stores.  */
961
962/* While scanning insns to find base values, reg_seen[N] is nonzero if
963   register N has been set in this function.  */
964static char *reg_seen;
965
966/* Addresses which are known not to alias anything else are identified
967   by a unique integer.  */
968static int unique_id;
969
970static void
971record_set (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
972{
973  unsigned regno;
974  rtx src;
975  int n;
976
977  if (!REG_P (dest))
978    return;
979
980  regno = REGNO (dest);
981
982  gcc_assert (regno < VARRAY_SIZE (reg_base_value));
983
984  /* If this spans multiple hard registers, then we must indicate that every
985     register has an unusable value.  */
986  if (regno < FIRST_PSEUDO_REGISTER)
987    n = hard_regno_nregs[regno][GET_MODE (dest)];
988  else
989    n = 1;
990  if (n != 1)
991    {
992      while (--n >= 0)
993	{
994	  reg_seen[regno + n] = 1;
995	  new_reg_base_value[regno + n] = 0;
996	}
997      return;
998    }
999
1000  if (set)
1001    {
1002      /* A CLOBBER wipes out any old value but does not prevent a previously
1003	 unset register from acquiring a base address (i.e. reg_seen is not
1004	 set).  */
1005      if (GET_CODE (set) == CLOBBER)
1006	{
1007	  new_reg_base_value[regno] = 0;
1008	  return;
1009	}
1010      src = SET_SRC (set);
1011    }
1012  else
1013    {
1014      if (reg_seen[regno])
1015	{
1016	  new_reg_base_value[regno] = 0;
1017	  return;
1018	}
1019      reg_seen[regno] = 1;
1020      new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1021						   GEN_INT (unique_id++));
1022      return;
1023    }
1024
1025  /* If this is not the first set of REGNO, see whether the new value
1026     is related to the old one.  There are two cases of interest:
1027
1028	(1) The register might be assigned an entirely new value
1029	    that has the same base term as the original set.
1030
1031	(2) The set might be a simple self-modification that
1032	    cannot change REGNO's base value.
1033
1034     If neither case holds, reject the original base value as invalid.
1035     Note that the following situation is not detected:
1036
1037         extern int x, y;  int *p = &x; p += (&y-&x);
1038
1039     ANSI C does not allow computing the difference of addresses
1040     of distinct top level objects.  */
1041  if (new_reg_base_value[regno] != 0
1042      && find_base_value (src) != new_reg_base_value[regno])
1043    switch (GET_CODE (src))
1044      {
1045      case LO_SUM:
1046      case MINUS:
1047	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1048	  new_reg_base_value[regno] = 0;
1049	break;
1050      case PLUS:
1051	/* If the value we add in the PLUS is also a valid base value,
1052	   this might be the actual base value, and the original value
1053	   an index.  */
1054	{
1055	  rtx other = NULL_RTX;
1056
1057	  if (XEXP (src, 0) == dest)
1058	    other = XEXP (src, 1);
1059	  else if (XEXP (src, 1) == dest)
1060	    other = XEXP (src, 0);
1061
1062	  if (! other || find_base_value (other))
1063	    new_reg_base_value[regno] = 0;
1064	  break;
1065	}
1066      case AND:
1067	if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT)
1068	  new_reg_base_value[regno] = 0;
1069	break;
1070      default:
1071	new_reg_base_value[regno] = 0;
1072	break;
1073      }
1074  /* If this is the first set of a register, record the value.  */
1075  else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1076	   && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1077    new_reg_base_value[regno] = find_base_value (src);
1078
1079  reg_seen[regno] = 1;
1080}
1081
1082/* Called from loop optimization when a new pseudo-register is
1083   created.  It indicates that REGNO is being set to VAL.  f INVARIANT
1084   is true then this value also describes an invariant relationship
1085   which can be used to deduce that two registers with unknown values
1086   are different.  */
1087
1088void
1089record_base_value (unsigned int regno, rtx val, int invariant)
1090{
1091  if (invariant && alias_invariant && regno < alias_invariant_size)
1092    alias_invariant[regno] = val;
1093
1094  if (regno >= VARRAY_SIZE (reg_base_value))
1095    VARRAY_GROW (reg_base_value, max_reg_num ());
1096
1097  if (REG_P (val))
1098    {
1099      VARRAY_RTX (reg_base_value, regno)
1100	 = REG_BASE_VALUE (val);
1101      return;
1102    }
1103  VARRAY_RTX (reg_base_value, regno)
1104     = find_base_value (val);
1105}
1106
1107/* Clear alias info for a register.  This is used if an RTL transformation
1108   changes the value of a register.  This is used in flow by AUTO_INC_DEC
1109   optimizations.  We don't need to clear reg_base_value, since flow only
1110   changes the offset.  */
1111
1112void
1113clear_reg_alias_info (rtx reg)
1114{
1115  unsigned int regno = REGNO (reg);
1116
1117  if (regno >= FIRST_PSEUDO_REGISTER)
1118    {
1119      regno -= FIRST_PSEUDO_REGISTER;
1120      if (regno < reg_known_value_size)
1121	{
1122	  reg_known_value[regno] = reg;
1123	  reg_known_equiv_p[regno] = false;
1124	}
1125    }
1126}
1127
1128/* If a value is known for REGNO, return it.  */
1129
1130rtx
1131get_reg_known_value (unsigned int regno)
1132{
1133  if (regno >= FIRST_PSEUDO_REGISTER)
1134    {
1135      regno -= FIRST_PSEUDO_REGISTER;
1136      if (regno < reg_known_value_size)
1137	return reg_known_value[regno];
1138    }
1139  return NULL;
1140}
1141
1142/* Set it.  */
1143
1144static void
1145set_reg_known_value (unsigned int regno, rtx val)
1146{
1147  if (regno >= FIRST_PSEUDO_REGISTER)
1148    {
1149      regno -= FIRST_PSEUDO_REGISTER;
1150      if (regno < reg_known_value_size)
1151	reg_known_value[regno] = val;
1152    }
1153}
1154
1155/* Similarly for reg_known_equiv_p.  */
1156
1157bool
1158get_reg_known_equiv_p (unsigned int regno)
1159{
1160  if (regno >= FIRST_PSEUDO_REGISTER)
1161    {
1162      regno -= FIRST_PSEUDO_REGISTER;
1163      if (regno < reg_known_value_size)
1164	return reg_known_equiv_p[regno];
1165    }
1166  return false;
1167}
1168
1169static void
1170set_reg_known_equiv_p (unsigned int regno, bool val)
1171{
1172  if (regno >= FIRST_PSEUDO_REGISTER)
1173    {
1174      regno -= FIRST_PSEUDO_REGISTER;
1175      if (regno < reg_known_value_size)
1176	reg_known_equiv_p[regno] = val;
1177    }
1178}
1179
1180
1181/* Returns a canonical version of X, from the point of view alias
1182   analysis.  (For example, if X is a MEM whose address is a register,
1183   and the register has a known value (say a SYMBOL_REF), then a MEM
1184   whose address is the SYMBOL_REF is returned.)  */
1185
1186rtx
1187canon_rtx (rtx x)
1188{
1189  /* Recursively look for equivalences.  */
1190  if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1191    {
1192      rtx t = get_reg_known_value (REGNO (x));
1193      if (t == x)
1194	return x;
1195      if (t)
1196	return canon_rtx (t);
1197    }
1198
1199  if (GET_CODE (x) == PLUS)
1200    {
1201      rtx x0 = canon_rtx (XEXP (x, 0));
1202      rtx x1 = canon_rtx (XEXP (x, 1));
1203
1204      if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1205	{
1206	  if (GET_CODE (x0) == CONST_INT)
1207	    return plus_constant (x1, INTVAL (x0));
1208	  else if (GET_CODE (x1) == CONST_INT)
1209	    return plus_constant (x0, INTVAL (x1));
1210	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1211	}
1212    }
1213
1214  /* This gives us much better alias analysis when called from
1215     the loop optimizer.   Note we want to leave the original
1216     MEM alone, but need to return the canonicalized MEM with
1217     all the flags with their original values.  */
1218  else if (MEM_P (x))
1219    x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1220
1221  return x;
1222}
1223
1224/* Return 1 if X and Y are identical-looking rtx's.
1225   Expect that X and Y has been already canonicalized.
1226
1227   We use the data in reg_known_value above to see if two registers with
1228   different numbers are, in fact, equivalent.  */
1229
1230static int
1231rtx_equal_for_memref_p (rtx x, rtx y)
1232{
1233  int i;
1234  int j;
1235  enum rtx_code code;
1236  const char *fmt;
1237
1238  if (x == 0 && y == 0)
1239    return 1;
1240  if (x == 0 || y == 0)
1241    return 0;
1242
1243  if (x == y)
1244    return 1;
1245
1246  code = GET_CODE (x);
1247  /* Rtx's of different codes cannot be equal.  */
1248  if (code != GET_CODE (y))
1249    return 0;
1250
1251  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1252     (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1253
1254  if (GET_MODE (x) != GET_MODE (y))
1255    return 0;
1256
1257  /* Some RTL can be compared without a recursive examination.  */
1258  switch (code)
1259    {
1260    case REG:
1261      return REGNO (x) == REGNO (y);
1262
1263    case LABEL_REF:
1264      return XEXP (x, 0) == XEXP (y, 0);
1265
1266    case SYMBOL_REF:
1267      return XSTR (x, 0) == XSTR (y, 0);
1268
1269    case VALUE:
1270    case CONST_INT:
1271    case CONST_DOUBLE:
1272      /* There's no need to compare the contents of CONST_DOUBLEs or
1273	 CONST_INTs because pointer equality is a good enough
1274	 comparison for these nodes.  */
1275      return 0;
1276
1277    default:
1278      break;
1279    }
1280
1281  /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1282  if (code == PLUS)
1283    return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1284	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1285	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1286		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1287  /* For commutative operations, the RTX match if the operand match in any
1288     order.  Also handle the simple binary and unary cases without a loop.  */
1289  if (COMMUTATIVE_P (x))
1290    {
1291      rtx xop0 = canon_rtx (XEXP (x, 0));
1292      rtx yop0 = canon_rtx (XEXP (y, 0));
1293      rtx yop1 = canon_rtx (XEXP (y, 1));
1294
1295      return ((rtx_equal_for_memref_p (xop0, yop0)
1296	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1297	      || (rtx_equal_for_memref_p (xop0, yop1)
1298		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1299    }
1300  else if (NON_COMMUTATIVE_P (x))
1301    {
1302      return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1303				      canon_rtx (XEXP (y, 0)))
1304	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1305					 canon_rtx (XEXP (y, 1))));
1306    }
1307  else if (UNARY_P (x))
1308    return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1309				   canon_rtx (XEXP (y, 0)));
1310
1311  /* Compare the elements.  If any pair of corresponding elements
1312     fail to match, return 0 for the whole things.
1313
1314     Limit cases to types which actually appear in addresses.  */
1315
1316  fmt = GET_RTX_FORMAT (code);
1317  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1318    {
1319      switch (fmt[i])
1320	{
1321	case 'i':
1322	  if (XINT (x, i) != XINT (y, i))
1323	    return 0;
1324	  break;
1325
1326	case 'E':
1327	  /* Two vectors must have the same length.  */
1328	  if (XVECLEN (x, i) != XVECLEN (y, i))
1329	    return 0;
1330
1331	  /* And the corresponding elements must match.  */
1332	  for (j = 0; j < XVECLEN (x, i); j++)
1333	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1334					canon_rtx (XVECEXP (y, i, j))) == 0)
1335	      return 0;
1336	  break;
1337
1338	case 'e':
1339	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1340				      canon_rtx (XEXP (y, i))) == 0)
1341	    return 0;
1342	  break;
1343
1344	  /* This can happen for asm operands.  */
1345	case 's':
1346	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1347	    return 0;
1348	  break;
1349
1350	/* This can happen for an asm which clobbers memory.  */
1351	case '0':
1352	  break;
1353
1354	  /* It is believed that rtx's at this level will never
1355	     contain anything but integers and other rtx's,
1356	     except for within LABEL_REFs and SYMBOL_REFs.  */
1357	default:
1358	  gcc_unreachable ();
1359	}
1360    }
1361  return 1;
1362}
1363
1364/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
1365   X and return it, or return 0 if none found.  */
1366
1367static rtx
1368find_symbolic_term (rtx x)
1369{
1370  int i;
1371  enum rtx_code code;
1372  const char *fmt;
1373
1374  code = GET_CODE (x);
1375  if (code == SYMBOL_REF || code == LABEL_REF)
1376    return x;
1377  if (OBJECT_P (x))
1378    return 0;
1379
1380  fmt = GET_RTX_FORMAT (code);
1381  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1382    {
1383      rtx t;
1384
1385      if (fmt[i] == 'e')
1386	{
1387	  t = find_symbolic_term (XEXP (x, i));
1388	  if (t != 0)
1389	    return t;
1390	}
1391      else if (fmt[i] == 'E')
1392	break;
1393    }
1394  return 0;
1395}
1396
1397rtx
1398find_base_term (rtx x)
1399{
1400  cselib_val *val;
1401  struct elt_loc_list *l;
1402
1403#if defined (FIND_BASE_TERM)
1404  /* Try machine-dependent ways to find the base term.  */
1405  x = FIND_BASE_TERM (x);
1406#endif
1407
1408  switch (GET_CODE (x))
1409    {
1410    case REG:
1411      return REG_BASE_VALUE (x);
1412
1413    case TRUNCATE:
1414      if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1415	return 0;
1416      /* Fall through.  */
1417    case HIGH:
1418    case PRE_INC:
1419    case PRE_DEC:
1420    case POST_INC:
1421    case POST_DEC:
1422    case PRE_MODIFY:
1423    case POST_MODIFY:
1424      return find_base_term (XEXP (x, 0));
1425
1426    case ZERO_EXTEND:
1427    case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1428      {
1429	rtx temp = find_base_term (XEXP (x, 0));
1430
1431	if (temp != 0 && CONSTANT_P (temp))
1432	  temp = convert_memory_address (Pmode, temp);
1433
1434	return temp;
1435      }
1436
1437    case VALUE:
1438      val = CSELIB_VAL_PTR (x);
1439      if (!val)
1440	return 0;
1441      for (l = val->locs; l; l = l->next)
1442	if ((x = find_base_term (l->loc)) != 0)
1443	  return x;
1444      return 0;
1445
1446    case CONST:
1447      x = XEXP (x, 0);
1448      if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1449	return 0;
1450      /* Fall through.  */
1451    case LO_SUM:
1452    case PLUS:
1453    case MINUS:
1454      {
1455	rtx tmp1 = XEXP (x, 0);
1456	rtx tmp2 = XEXP (x, 1);
1457
1458	/* This is a little bit tricky since we have to determine which of
1459	   the two operands represents the real base address.  Otherwise this
1460	   routine may return the index register instead of the base register.
1461
1462	   That may cause us to believe no aliasing was possible, when in
1463	   fact aliasing is possible.
1464
1465	   We use a few simple tests to guess the base register.  Additional
1466	   tests can certainly be added.  For example, if one of the operands
1467	   is a shift or multiply, then it must be the index register and the
1468	   other operand is the base register.  */
1469
1470	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1471	  return find_base_term (tmp2);
1472
1473	/* If either operand is known to be a pointer, then use it
1474	   to determine the base term.  */
1475	if (REG_P (tmp1) && REG_POINTER (tmp1))
1476	  return find_base_term (tmp1);
1477
1478	if (REG_P (tmp2) && REG_POINTER (tmp2))
1479	  return find_base_term (tmp2);
1480
1481	/* Neither operand was known to be a pointer.  Go ahead and find the
1482	   base term for both operands.  */
1483	tmp1 = find_base_term (tmp1);
1484	tmp2 = find_base_term (tmp2);
1485
1486	/* If either base term is named object or a special address
1487	   (like an argument or stack reference), then use it for the
1488	   base term.  */
1489	if (tmp1 != 0
1490	    && (GET_CODE (tmp1) == SYMBOL_REF
1491		|| GET_CODE (tmp1) == LABEL_REF
1492		|| (GET_CODE (tmp1) == ADDRESS
1493		    && GET_MODE (tmp1) != VOIDmode)))
1494	  return tmp1;
1495
1496	if (tmp2 != 0
1497	    && (GET_CODE (tmp2) == SYMBOL_REF
1498		|| GET_CODE (tmp2) == LABEL_REF
1499		|| (GET_CODE (tmp2) == ADDRESS
1500		    && GET_MODE (tmp2) != VOIDmode)))
1501	  return tmp2;
1502
1503	/* We could not determine which of the two operands was the
1504	   base register and which was the index.  So we can determine
1505	   nothing from the base alias check.  */
1506	return 0;
1507      }
1508
1509    case AND:
1510      if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) != 0)
1511	return find_base_term (XEXP (x, 0));
1512      return 0;
1513
1514    case SYMBOL_REF:
1515    case LABEL_REF:
1516      return x;
1517
1518    default:
1519      return 0;
1520    }
1521}
1522
1523/* Return 0 if the addresses X and Y are known to point to different
1524   objects, 1 if they might be pointers to the same object.  */
1525
1526static int
1527base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1528		  enum machine_mode y_mode)
1529{
1530  rtx x_base = find_base_term (x);
1531  rtx y_base = find_base_term (y);
1532
1533  /* If the address itself has no known base see if a known equivalent
1534     value has one.  If either address still has no known base, nothing
1535     is known about aliasing.  */
1536  if (x_base == 0)
1537    {
1538      rtx x_c;
1539
1540      if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1541	return 1;
1542
1543      x_base = find_base_term (x_c);
1544      if (x_base == 0)
1545	return 1;
1546    }
1547
1548  if (y_base == 0)
1549    {
1550      rtx y_c;
1551      if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1552	return 1;
1553
1554      y_base = find_base_term (y_c);
1555      if (y_base == 0)
1556	return 1;
1557    }
1558
1559  /* If the base addresses are equal nothing is known about aliasing.  */
1560  if (rtx_equal_p (x_base, y_base))
1561    return 1;
1562
1563  /* The base addresses of the read and write are different expressions.
1564     If they are both symbols and they are not accessed via AND, there is
1565     no conflict.  We can bring knowledge of object alignment into play
1566     here.  For example, on alpha, "char a, b;" can alias one another,
1567     though "char a; long b;" cannot.  */
1568  if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1569    {
1570      if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1571	return 1;
1572      if (GET_CODE (x) == AND
1573	  && (GET_CODE (XEXP (x, 1)) != CONST_INT
1574	      || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1575	return 1;
1576      if (GET_CODE (y) == AND
1577	  && (GET_CODE (XEXP (y, 1)) != CONST_INT
1578	      || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1579	return 1;
1580      /* Differing symbols never alias.  */
1581      return 0;
1582    }
1583
1584  /* If one address is a stack reference there can be no alias:
1585     stack references using different base registers do not alias,
1586     a stack reference can not alias a parameter, and a stack reference
1587     can not alias a global.  */
1588  if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1589      || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1590    return 0;
1591
1592  if (! flag_argument_noalias)
1593    return 1;
1594
1595  if (flag_argument_noalias > 1)
1596    return 0;
1597
1598  /* Weak noalias assertion (arguments are distinct, but may match globals).  */
1599  return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
1600}
1601
1602/* Convert the address X into something we can use.  This is done by returning
1603   it unchanged unless it is a value; in the latter case we call cselib to get
1604   a more useful rtx.  */
1605
1606rtx
1607get_addr (rtx x)
1608{
1609  cselib_val *v;
1610  struct elt_loc_list *l;
1611
1612  if (GET_CODE (x) != VALUE)
1613    return x;
1614  v = CSELIB_VAL_PTR (x);
1615  if (v)
1616    {
1617      for (l = v->locs; l; l = l->next)
1618	if (CONSTANT_P (l->loc))
1619	  return l->loc;
1620      for (l = v->locs; l; l = l->next)
1621	if (!REG_P (l->loc) && !MEM_P (l->loc))
1622	  return l->loc;
1623      if (v->locs)
1624	return v->locs->loc;
1625    }
1626  return x;
1627}
1628
1629/*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1630    where SIZE is the size in bytes of the memory reference.  If ADDR
1631    is not modified by the memory reference then ADDR is returned.  */
1632
1633static rtx
1634addr_side_effect_eval (rtx addr, int size, int n_refs)
1635{
1636  int offset = 0;
1637
1638  switch (GET_CODE (addr))
1639    {
1640    case PRE_INC:
1641      offset = (n_refs + 1) * size;
1642      break;
1643    case PRE_DEC:
1644      offset = -(n_refs + 1) * size;
1645      break;
1646    case POST_INC:
1647      offset = n_refs * size;
1648      break;
1649    case POST_DEC:
1650      offset = -n_refs * size;
1651      break;
1652
1653    default:
1654      return addr;
1655    }
1656
1657  if (offset)
1658    addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1659		         GEN_INT (offset));
1660  else
1661    addr = XEXP (addr, 0);
1662  addr = canon_rtx (addr);
1663
1664  return addr;
1665}
1666
1667/* Return nonzero if X and Y (memory addresses) could reference the
1668   same location in memory.  C is an offset accumulator.  When
1669   C is nonzero, we are testing aliases between X and Y + C.
1670   XSIZE is the size in bytes of the X reference,
1671   similarly YSIZE is the size in bytes for Y.
1672   Expect that canon_rtx has been already called for X and Y.
1673
1674   If XSIZE or YSIZE is zero, we do not know the amount of memory being
1675   referenced (the reference was BLKmode), so make the most pessimistic
1676   assumptions.
1677
1678   If XSIZE or YSIZE is negative, we may access memory outside the object
1679   being referenced as a side effect.  This can happen when using AND to
1680   align memory references, as is done on the Alpha.
1681
1682   Nice to notice that varying addresses cannot conflict with fp if no
1683   local variables had their addresses taken, but that's too hard now.  */
1684
1685static int
1686memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1687{
1688  if (GET_CODE (x) == VALUE)
1689    x = get_addr (x);
1690  if (GET_CODE (y) == VALUE)
1691    y = get_addr (y);
1692  if (GET_CODE (x) == HIGH)
1693    x = XEXP (x, 0);
1694  else if (GET_CODE (x) == LO_SUM)
1695    x = XEXP (x, 1);
1696  else
1697    x = addr_side_effect_eval (x, xsize, 0);
1698  if (GET_CODE (y) == HIGH)
1699    y = XEXP (y, 0);
1700  else if (GET_CODE (y) == LO_SUM)
1701    y = XEXP (y, 1);
1702  else
1703    y = addr_side_effect_eval (y, ysize, 0);
1704
1705  if (rtx_equal_for_memref_p (x, y))
1706    {
1707      if (xsize <= 0 || ysize <= 0)
1708	return 1;
1709      if (c >= 0 && xsize > c)
1710	return 1;
1711      if (c < 0 && ysize+c > 0)
1712	return 1;
1713      return 0;
1714    }
1715
1716  /* This code used to check for conflicts involving stack references and
1717     globals but the base address alias code now handles these cases.  */
1718
1719  if (GET_CODE (x) == PLUS)
1720    {
1721      /* The fact that X is canonicalized means that this
1722	 PLUS rtx is canonicalized.  */
1723      rtx x0 = XEXP (x, 0);
1724      rtx x1 = XEXP (x, 1);
1725
1726      if (GET_CODE (y) == PLUS)
1727	{
1728	  /* The fact that Y is canonicalized means that this
1729	     PLUS rtx is canonicalized.  */
1730	  rtx y0 = XEXP (y, 0);
1731	  rtx y1 = XEXP (y, 1);
1732
1733	  if (rtx_equal_for_memref_p (x1, y1))
1734	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1735	  if (rtx_equal_for_memref_p (x0, y0))
1736	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1737	  if (GET_CODE (x1) == CONST_INT)
1738	    {
1739	      if (GET_CODE (y1) == CONST_INT)
1740		return memrefs_conflict_p (xsize, x0, ysize, y0,
1741					   c - INTVAL (x1) + INTVAL (y1));
1742	      else
1743		return memrefs_conflict_p (xsize, x0, ysize, y,
1744					   c - INTVAL (x1));
1745	    }
1746	  else if (GET_CODE (y1) == CONST_INT)
1747	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1748
1749	  return 1;
1750	}
1751      else if (GET_CODE (x1) == CONST_INT)
1752	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1753    }
1754  else if (GET_CODE (y) == PLUS)
1755    {
1756      /* The fact that Y is canonicalized means that this
1757	 PLUS rtx is canonicalized.  */
1758      rtx y0 = XEXP (y, 0);
1759      rtx y1 = XEXP (y, 1);
1760
1761      if (GET_CODE (y1) == CONST_INT)
1762	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1763      else
1764	return 1;
1765    }
1766
1767  if (GET_CODE (x) == GET_CODE (y))
1768    switch (GET_CODE (x))
1769      {
1770      case MULT:
1771	{
1772	  /* Handle cases where we expect the second operands to be the
1773	     same, and check only whether the first operand would conflict
1774	     or not.  */
1775	  rtx x0, y0;
1776	  rtx x1 = canon_rtx (XEXP (x, 1));
1777	  rtx y1 = canon_rtx (XEXP (y, 1));
1778	  if (! rtx_equal_for_memref_p (x1, y1))
1779	    return 1;
1780	  x0 = canon_rtx (XEXP (x, 0));
1781	  y0 = canon_rtx (XEXP (y, 0));
1782	  if (rtx_equal_for_memref_p (x0, y0))
1783	    return (xsize == 0 || ysize == 0
1784		    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1785
1786	  /* Can't properly adjust our sizes.  */
1787	  if (GET_CODE (x1) != CONST_INT)
1788	    return 1;
1789	  xsize /= INTVAL (x1);
1790	  ysize /= INTVAL (x1);
1791	  c /= INTVAL (x1);
1792	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1793	}
1794
1795      case REG:
1796	/* Are these registers known not to be equal?  */
1797	if (alias_invariant)
1798	  {
1799	    unsigned int r_x = REGNO (x), r_y = REGNO (y);
1800	    rtx i_x, i_y;	/* invariant relationships of X and Y */
1801
1802	    i_x = r_x >= alias_invariant_size ? 0 : alias_invariant[r_x];
1803	    i_y = r_y >= alias_invariant_size ? 0 : alias_invariant[r_y];
1804
1805	    if (i_x == 0 && i_y == 0)
1806	      break;
1807
1808	    if (! memrefs_conflict_p (xsize, i_x ? i_x : x,
1809				      ysize, i_y ? i_y : y, c))
1810	      return 0;
1811	  }
1812	break;
1813
1814      default:
1815	break;
1816      }
1817
1818  /* Treat an access through an AND (e.g. a subword access on an Alpha)
1819     as an access with indeterminate size.  Assume that references
1820     besides AND are aligned, so if the size of the other reference is
1821     at least as large as the alignment, assume no other overlap.  */
1822  if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT)
1823    {
1824      if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
1825	xsize = -1;
1826      return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
1827    }
1828  if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT)
1829    {
1830      /* ??? If we are indexing far enough into the array/structure, we
1831	 may yet be able to determine that we can not overlap.  But we
1832	 also need to that we are far enough from the end not to overlap
1833	 a following reference, so we do nothing with that for now.  */
1834      if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
1835	ysize = -1;
1836      return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
1837    }
1838
1839  if (CONSTANT_P (x))
1840    {
1841      if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
1842	{
1843	  c += (INTVAL (y) - INTVAL (x));
1844	  return (xsize <= 0 || ysize <= 0
1845		  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1846	}
1847
1848      if (GET_CODE (x) == CONST)
1849	{
1850	  if (GET_CODE (y) == CONST)
1851	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1852				       ysize, canon_rtx (XEXP (y, 0)), c);
1853	  else
1854	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1855				       ysize, y, c);
1856	}
1857      if (GET_CODE (y) == CONST)
1858	return memrefs_conflict_p (xsize, x, ysize,
1859				   canon_rtx (XEXP (y, 0)), c);
1860
1861      if (CONSTANT_P (y))
1862	return (xsize <= 0 || ysize <= 0
1863		|| (rtx_equal_for_memref_p (x, y)
1864		    && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
1865
1866      return 1;
1867    }
1868  return 1;
1869}
1870
1871/* Functions to compute memory dependencies.
1872
1873   Since we process the insns in execution order, we can build tables
1874   to keep track of what registers are fixed (and not aliased), what registers
1875   are varying in known ways, and what registers are varying in unknown
1876   ways.
1877
1878   If both memory references are volatile, then there must always be a
1879   dependence between the two references, since their order can not be
1880   changed.  A volatile and non-volatile reference can be interchanged
1881   though.
1882
1883   A MEM_IN_STRUCT reference at a non-AND varying address can never
1884   conflict with a non-MEM_IN_STRUCT reference at a fixed address.  We
1885   also must allow AND addresses, because they may generate accesses
1886   outside the object being referenced.  This is used to generate
1887   aligned addresses from unaligned addresses, for instance, the alpha
1888   storeqi_unaligned pattern.  */
1889
1890/* Read dependence: X is read after read in MEM takes place.  There can
1891   only be a dependence here if both reads are volatile.  */
1892
1893int
1894read_dependence (rtx mem, rtx x)
1895{
1896  return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
1897}
1898
1899/* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
1900   MEM2 is a reference to a structure at a varying address, or returns
1901   MEM2 if vice versa.  Otherwise, returns NULL_RTX.  If a non-NULL
1902   value is returned MEM1 and MEM2 can never alias.  VARIES_P is used
1903   to decide whether or not an address may vary; it should return
1904   nonzero whenever variation is possible.
1905   MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2.  */
1906
1907static rtx
1908fixed_scalar_and_varying_struct_p (rtx mem1, rtx mem2, rtx mem1_addr,
1909				   rtx mem2_addr,
1910				   int (*varies_p) (rtx, int))
1911{
1912  if (! flag_strict_aliasing)
1913    return NULL_RTX;
1914
1915  if (MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
1916      && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
1917    /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
1918       varying address.  */
1919    return mem1;
1920
1921  if (MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
1922      && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
1923    /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
1924       varying address.  */
1925    return mem2;
1926
1927  return NULL_RTX;
1928}
1929
1930/* Returns nonzero if something about the mode or address format MEM1
1931   indicates that it might well alias *anything*.  */
1932
1933static int
1934aliases_everything_p (rtx mem)
1935{
1936  if (GET_CODE (XEXP (mem, 0)) == AND)
1937    /* If the address is an AND, it's very hard to know at what it is
1938       actually pointing.  */
1939    return 1;
1940
1941  return 0;
1942}
1943
1944/* Return true if we can determine that the fields referenced cannot
1945   overlap for any pair of objects.  */
1946
1947static bool
1948nonoverlapping_component_refs_p (tree x, tree y)
1949{
1950  tree fieldx, fieldy, typex, typey, orig_y;
1951
1952  do
1953    {
1954      /* The comparison has to be done at a common type, since we don't
1955	 know how the inheritance hierarchy works.  */
1956      orig_y = y;
1957      do
1958	{
1959	  fieldx = TREE_OPERAND (x, 1);
1960	  typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
1961
1962	  y = orig_y;
1963	  do
1964	    {
1965	      fieldy = TREE_OPERAND (y, 1);
1966	      typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
1967
1968	      if (typex == typey)
1969		goto found;
1970
1971	      y = TREE_OPERAND (y, 0);
1972	    }
1973	  while (y && TREE_CODE (y) == COMPONENT_REF);
1974
1975	  x = TREE_OPERAND (x, 0);
1976	}
1977      while (x && TREE_CODE (x) == COMPONENT_REF);
1978      /* Never found a common type.  */
1979      return false;
1980
1981    found:
1982      /* If we're left with accessing different fields of a structure,
1983	 then no overlap.  */
1984      if (TREE_CODE (typex) == RECORD_TYPE
1985	  && fieldx != fieldy)
1986	return true;
1987
1988      /* The comparison on the current field failed.  If we're accessing
1989	 a very nested structure, look at the next outer level.  */
1990      x = TREE_OPERAND (x, 0);
1991      y = TREE_OPERAND (y, 0);
1992    }
1993  while (x && y
1994	 && TREE_CODE (x) == COMPONENT_REF
1995	 && TREE_CODE (y) == COMPONENT_REF);
1996
1997  return false;
1998}
1999
2000/* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2001
2002static tree
2003decl_for_component_ref (tree x)
2004{
2005  do
2006    {
2007      x = TREE_OPERAND (x, 0);
2008    }
2009  while (x && TREE_CODE (x) == COMPONENT_REF);
2010
2011  return x && DECL_P (x) ? x : NULL_TREE;
2012}
2013
2014/* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
2015   offset of the field reference.  */
2016
2017static rtx
2018adjust_offset_for_component_ref (tree x, rtx offset)
2019{
2020  HOST_WIDE_INT ioffset;
2021
2022  if (! offset)
2023    return NULL_RTX;
2024
2025  ioffset = INTVAL (offset);
2026  do
2027    {
2028      tree offset = component_ref_field_offset (x);
2029      tree field = TREE_OPERAND (x, 1);
2030
2031      if (! host_integerp (offset, 1))
2032	return NULL_RTX;
2033      ioffset += (tree_low_cst (offset, 1)
2034		  + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2035		     / BITS_PER_UNIT));
2036
2037      x = TREE_OPERAND (x, 0);
2038    }
2039  while (x && TREE_CODE (x) == COMPONENT_REF);
2040
2041  return GEN_INT (ioffset);
2042}
2043
2044/* Return nonzero if we can determine the exprs corresponding to memrefs
2045   X and Y and they do not overlap.  */
2046
2047static int
2048nonoverlapping_memrefs_p (rtx x, rtx y)
2049{
2050  tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2051  rtx rtlx, rtly;
2052  rtx basex, basey;
2053  rtx moffsetx, moffsety;
2054  HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2055
2056  /* Unless both have exprs, we can't tell anything.  */
2057  if (exprx == 0 || expry == 0)
2058    return 0;
2059
2060  /* If both are field references, we may be able to determine something.  */
2061  if (TREE_CODE (exprx) == COMPONENT_REF
2062      && TREE_CODE (expry) == COMPONENT_REF
2063      && nonoverlapping_component_refs_p (exprx, expry))
2064    return 1;
2065
2066
2067  /* If the field reference test failed, look at the DECLs involved.  */
2068  moffsetx = MEM_OFFSET (x);
2069  if (TREE_CODE (exprx) == COMPONENT_REF)
2070    {
2071      if (TREE_CODE (expry) == VAR_DECL
2072	  && POINTER_TYPE_P (TREE_TYPE (expry)))
2073	{
2074	 tree field = TREE_OPERAND (exprx, 1);
2075	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2076	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2077						       TREE_TYPE (field)))
2078	   return 1;
2079	}
2080      {
2081	tree t = decl_for_component_ref (exprx);
2082	if (! t)
2083	  return 0;
2084	moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
2085	exprx = t;
2086      }
2087    }
2088  else if (INDIRECT_REF_P (exprx))
2089    {
2090      exprx = TREE_OPERAND (exprx, 0);
2091      if (flag_argument_noalias < 2
2092	  || TREE_CODE (exprx) != PARM_DECL)
2093	return 0;
2094    }
2095
2096  moffsety = MEM_OFFSET (y);
2097  if (TREE_CODE (expry) == COMPONENT_REF)
2098    {
2099      if (TREE_CODE (exprx) == VAR_DECL
2100	  && POINTER_TYPE_P (TREE_TYPE (exprx)))
2101	{
2102	 tree field = TREE_OPERAND (expry, 1);
2103	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2104	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2105						       TREE_TYPE (field)))
2106	   return 1;
2107	}
2108      {
2109	tree t = decl_for_component_ref (expry);
2110	if (! t)
2111	  return 0;
2112	moffsety = adjust_offset_for_component_ref (expry, moffsety);
2113	expry = t;
2114      }
2115    }
2116  else if (INDIRECT_REF_P (expry))
2117    {
2118      expry = TREE_OPERAND (expry, 0);
2119      if (flag_argument_noalias < 2
2120	  || TREE_CODE (expry) != PARM_DECL)
2121	return 0;
2122    }
2123
2124  if (! DECL_P (exprx) || ! DECL_P (expry))
2125    return 0;
2126
2127  rtlx = DECL_RTL (exprx);
2128  rtly = DECL_RTL (expry);
2129
2130  /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2131     can't overlap unless they are the same because we never reuse that part
2132     of the stack frame used for locals for spilled pseudos.  */
2133  if ((!MEM_P (rtlx) || !MEM_P (rtly))
2134      && ! rtx_equal_p (rtlx, rtly))
2135    return 1;
2136
2137  /* Get the base and offsets of both decls.  If either is a register, we
2138     know both are and are the same, so use that as the base.  The only
2139     we can avoid overlap is if we can deduce that they are nonoverlapping
2140     pieces of that decl, which is very rare.  */
2141  basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2142  if (GET_CODE (basex) == PLUS && GET_CODE (XEXP (basex, 1)) == CONST_INT)
2143    offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2144
2145  basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2146  if (GET_CODE (basey) == PLUS && GET_CODE (XEXP (basey, 1)) == CONST_INT)
2147    offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2148
2149  /* If the bases are different, we know they do not overlap if both
2150     are constants or if one is a constant and the other a pointer into the
2151     stack frame.  Otherwise a different base means we can't tell if they
2152     overlap or not.  */
2153  if (! rtx_equal_p (basex, basey))
2154    return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2155	    || (CONSTANT_P (basex) && REG_P (basey)
2156		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2157	    || (CONSTANT_P (basey) && REG_P (basex)
2158		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2159
2160  sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2161	   : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
2162	   : -1);
2163  sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2164	   : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
2165	   -1);
2166
2167  /* If we have an offset for either memref, it can update the values computed
2168     above.  */
2169  if (moffsetx)
2170    offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
2171  if (moffsety)
2172    offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
2173
2174  /* If a memref has both a size and an offset, we can use the smaller size.
2175     We can't do this if the offset isn't known because we must view this
2176     memref as being anywhere inside the DECL's MEM.  */
2177  if (MEM_SIZE (x) && moffsetx)
2178    sizex = INTVAL (MEM_SIZE (x));
2179  if (MEM_SIZE (y) && moffsety)
2180    sizey = INTVAL (MEM_SIZE (y));
2181
2182  /* Put the values of the memref with the lower offset in X's values.  */
2183  if (offsetx > offsety)
2184    {
2185      tem = offsetx, offsetx = offsety, offsety = tem;
2186      tem = sizex, sizex = sizey, sizey = tem;
2187    }
2188
2189  /* If we don't know the size of the lower-offset value, we can't tell
2190     if they conflict.  Otherwise, we do the test.  */
2191  return sizex >= 0 && offsety >= offsetx + sizex;
2192}
2193
2194/* True dependence: X is read after store in MEM takes place.  */
2195
2196int
2197true_dependence (rtx mem, enum machine_mode mem_mode, rtx x,
2198		 int (*varies) (rtx, int))
2199{
2200  rtx x_addr, mem_addr;
2201  rtx base;
2202
2203  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2204    return 1;
2205
2206  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2207     This is used in epilogue deallocation functions.  */
2208  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2209    return 1;
2210  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2211    return 1;
2212  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2213      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2214    return 1;
2215
2216  if (DIFFERENT_ALIAS_SETS_P (x, mem))
2217    return 0;
2218
2219  /* Read-only memory is by definition never modified, and therefore can't
2220     conflict with anything.  We don't expect to find read-only set on MEM,
2221     but stupid user tricks can produce them, so don't die.  */
2222  if (MEM_READONLY_P (x))
2223    return 0;
2224
2225  if (nonoverlapping_memrefs_p (mem, x))
2226    return 0;
2227
2228  if (mem_mode == VOIDmode)
2229    mem_mode = GET_MODE (mem);
2230
2231  x_addr = get_addr (XEXP (x, 0));
2232  mem_addr = get_addr (XEXP (mem, 0));
2233
2234  base = find_base_term (x_addr);
2235  if (base && (GET_CODE (base) == LABEL_REF
2236	       || (GET_CODE (base) == SYMBOL_REF
2237		   && CONSTANT_POOL_ADDRESS_P (base))))
2238    return 0;
2239
2240  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2241    return 0;
2242
2243  x_addr = canon_rtx (x_addr);
2244  mem_addr = canon_rtx (mem_addr);
2245
2246  if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2247			    SIZE_FOR_MODE (x), x_addr, 0))
2248    return 0;
2249
2250  if (aliases_everything_p (x))
2251    return 1;
2252
2253  /* We cannot use aliases_everything_p to test MEM, since we must look
2254     at MEM_MODE, rather than GET_MODE (MEM).  */
2255  if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2256    return 1;
2257
2258  /* In true_dependence we also allow BLKmode to alias anything.  Why
2259     don't we do this in anti_dependence and output_dependence?  */
2260  if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2261    return 1;
2262
2263  return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2264					      varies);
2265}
2266
2267/* Canonical true dependence: X is read after store in MEM takes place.
2268   Variant of true_dependence which assumes MEM has already been
2269   canonicalized (hence we no longer do that here).
2270   The mem_addr argument has been added, since true_dependence computed
2271   this value prior to canonicalizing.  */
2272
2273int
2274canon_true_dependence (rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2275		       rtx x, int (*varies) (rtx, int))
2276{
2277  rtx x_addr;
2278
2279  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2280    return 1;
2281
2282  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2283     This is used in epilogue deallocation functions.  */
2284  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2285    return 1;
2286  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2287    return 1;
2288  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2289      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2290    return 1;
2291
2292  if (DIFFERENT_ALIAS_SETS_P (x, mem))
2293    return 0;
2294
2295  /* Read-only memory is by definition never modified, and therefore can't
2296     conflict with anything.  We don't expect to find read-only set on MEM,
2297     but stupid user tricks can produce them, so don't die.  */
2298  if (MEM_READONLY_P (x))
2299    return 0;
2300
2301  if (nonoverlapping_memrefs_p (x, mem))
2302    return 0;
2303
2304  x_addr = get_addr (XEXP (x, 0));
2305
2306  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2307    return 0;
2308
2309  x_addr = canon_rtx (x_addr);
2310  if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2311			    SIZE_FOR_MODE (x), x_addr, 0))
2312    return 0;
2313
2314  if (aliases_everything_p (x))
2315    return 1;
2316
2317  /* We cannot use aliases_everything_p to test MEM, since we must look
2318     at MEM_MODE, rather than GET_MODE (MEM).  */
2319  if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2320    return 1;
2321
2322  /* In true_dependence we also allow BLKmode to alias anything.  Why
2323     don't we do this in anti_dependence and output_dependence?  */
2324  if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2325    return 1;
2326
2327  return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2328					      varies);
2329}
2330
2331/* Returns nonzero if a write to X might alias a previous read from
2332   (or, if WRITEP is nonzero, a write to) MEM.  */
2333
2334static int
2335write_dependence_p (rtx mem, rtx x, int writep)
2336{
2337  rtx x_addr, mem_addr;
2338  rtx fixed_scalar;
2339  rtx base;
2340
2341  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2342    return 1;
2343
2344  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2345     This is used in epilogue deallocation functions.  */
2346  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2347    return 1;
2348  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2349    return 1;
2350  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2351      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2352    return 1;
2353
2354  if (DIFFERENT_ALIAS_SETS_P (x, mem))
2355    return 0;
2356
2357  /* A read from read-only memory can't conflict with read-write memory.  */
2358  if (!writep && MEM_READONLY_P (mem))
2359    return 0;
2360
2361  if (nonoverlapping_memrefs_p (x, mem))
2362    return 0;
2363
2364  x_addr = get_addr (XEXP (x, 0));
2365  mem_addr = get_addr (XEXP (mem, 0));
2366
2367  if (! writep)
2368    {
2369      base = find_base_term (mem_addr);
2370      if (base && (GET_CODE (base) == LABEL_REF
2371		   || (GET_CODE (base) == SYMBOL_REF
2372		       && CONSTANT_POOL_ADDRESS_P (base))))
2373	return 0;
2374    }
2375
2376  if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2377			  GET_MODE (mem)))
2378    return 0;
2379
2380  x_addr = canon_rtx (x_addr);
2381  mem_addr = canon_rtx (mem_addr);
2382
2383  if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2384			   SIZE_FOR_MODE (x), x_addr, 0))
2385    return 0;
2386
2387  fixed_scalar
2388    = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2389					 rtx_addr_varies_p);
2390
2391  return (!(fixed_scalar == mem && !aliases_everything_p (x))
2392	  && !(fixed_scalar == x && !aliases_everything_p (mem)));
2393}
2394
2395/* Anti dependence: X is written after read in MEM takes place.  */
2396
2397int
2398anti_dependence (rtx mem, rtx x)
2399{
2400  return write_dependence_p (mem, x, /*writep=*/0);
2401}
2402
2403/* Output dependence: X is written after store in MEM takes place.  */
2404
2405int
2406output_dependence (rtx mem, rtx x)
2407{
2408  return write_dependence_p (mem, x, /*writep=*/1);
2409}
2410
2411
2412void
2413init_alias_once (void)
2414{
2415  int i;
2416
2417  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2418    /* Check whether this register can hold an incoming pointer
2419       argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2420       numbers, so translate if necessary due to register windows.  */
2421    if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2422	&& HARD_REGNO_MODE_OK (i, Pmode))
2423      static_reg_base_value[i]
2424	= gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2425
2426  static_reg_base_value[STACK_POINTER_REGNUM]
2427    = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2428  static_reg_base_value[ARG_POINTER_REGNUM]
2429    = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2430  static_reg_base_value[FRAME_POINTER_REGNUM]
2431    = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2432#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2433  static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2434    = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2435#endif
2436}
2437
2438/* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2439   to be memory reference.  */
2440static bool memory_modified;
2441static void
2442memory_modified_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
2443{
2444  if (MEM_P (x))
2445    {
2446      if (anti_dependence (x, (rtx)data) || output_dependence (x, (rtx)data))
2447	memory_modified = true;
2448    }
2449}
2450
2451
2452/* Return true when INSN possibly modify memory contents of MEM
2453   (i.e. address can be modified).  */
2454bool
2455memory_modified_in_insn_p (rtx mem, rtx insn)
2456{
2457  if (!INSN_P (insn))
2458    return false;
2459  memory_modified = false;
2460  note_stores (PATTERN (insn), memory_modified_1, mem);
2461  return memory_modified;
2462}
2463
2464/* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2465   array.  */
2466
2467void
2468init_alias_analysis (void)
2469{
2470  unsigned int maxreg = max_reg_num ();
2471  int changed, pass;
2472  int i;
2473  unsigned int ui;
2474  rtx insn;
2475
2476  timevar_push (TV_ALIAS_ANALYSIS);
2477
2478  reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2479  reg_known_value = ggc_calloc (reg_known_value_size, sizeof (rtx));
2480  reg_known_equiv_p = xcalloc (reg_known_value_size, sizeof (bool));
2481
2482  /* Overallocate reg_base_value to allow some growth during loop
2483     optimization.  Loop unrolling can create a large number of
2484     registers.  */
2485  if (old_reg_base_value)
2486    {
2487      reg_base_value = old_reg_base_value;
2488      /* If varray gets large zeroing cost may get important.  */
2489      if (VARRAY_SIZE (reg_base_value) > 256
2490          && VARRAY_SIZE (reg_base_value) > 4 * maxreg)
2491	VARRAY_GROW (reg_base_value, maxreg);
2492      VARRAY_CLEAR (reg_base_value);
2493      if (VARRAY_SIZE (reg_base_value) < maxreg)
2494	VARRAY_GROW (reg_base_value, maxreg);
2495    }
2496  else
2497    {
2498      VARRAY_RTX_INIT (reg_base_value, maxreg, "reg_base_value");
2499    }
2500
2501  new_reg_base_value = xmalloc (maxreg * sizeof (rtx));
2502  reg_seen = xmalloc (maxreg);
2503
2504  /* The basic idea is that each pass through this loop will use the
2505     "constant" information from the previous pass to propagate alias
2506     information through another level of assignments.
2507
2508     This could get expensive if the assignment chains are long.  Maybe
2509     we should throttle the number of iterations, possibly based on
2510     the optimization level or flag_expensive_optimizations.
2511
2512     We could propagate more information in the first pass by making use
2513     of REG_N_SETS to determine immediately that the alias information
2514     for a pseudo is "constant".
2515
2516     A program with an uninitialized variable can cause an infinite loop
2517     here.  Instead of doing a full dataflow analysis to detect such problems
2518     we just cap the number of iterations for the loop.
2519
2520     The state of the arrays for the set chain in question does not matter
2521     since the program has undefined behavior.  */
2522
2523  pass = 0;
2524  do
2525    {
2526      /* Assume nothing will change this iteration of the loop.  */
2527      changed = 0;
2528
2529      /* We want to assign the same IDs each iteration of this loop, so
2530	 start counting from zero each iteration of the loop.  */
2531      unique_id = 0;
2532
2533      /* We're at the start of the function each iteration through the
2534	 loop, so we're copying arguments.  */
2535      copying_arguments = true;
2536
2537      /* Wipe the potential alias information clean for this pass.  */
2538      memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2539
2540      /* Wipe the reg_seen array clean.  */
2541      memset (reg_seen, 0, maxreg);
2542
2543      /* Mark all hard registers which may contain an address.
2544	 The stack, frame and argument pointers may contain an address.
2545	 An argument register which can hold a Pmode value may contain
2546	 an address even if it is not in BASE_REGS.
2547
2548	 The address expression is VOIDmode for an argument and
2549	 Pmode for other registers.  */
2550
2551      memcpy (new_reg_base_value, static_reg_base_value,
2552	      FIRST_PSEUDO_REGISTER * sizeof (rtx));
2553
2554      /* Walk the insns adding values to the new_reg_base_value array.  */
2555      for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2556	{
2557	  if (INSN_P (insn))
2558	    {
2559	      rtx note, set;
2560
2561#if defined (HAVE_prologue) || defined (HAVE_epilogue)
2562	      /* The prologue/epilogue insns are not threaded onto the
2563		 insn chain until after reload has completed.  Thus,
2564		 there is no sense wasting time checking if INSN is in
2565		 the prologue/epilogue until after reload has completed.  */
2566	      if (reload_completed
2567		  && prologue_epilogue_contains (insn))
2568		continue;
2569#endif
2570
2571	      /* If this insn has a noalias note, process it,  Otherwise,
2572	         scan for sets.  A simple set will have no side effects
2573	         which could change the base value of any other register.  */
2574
2575	      if (GET_CODE (PATTERN (insn)) == SET
2576		  && REG_NOTES (insn) != 0
2577		  && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2578		record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2579	      else
2580		note_stores (PATTERN (insn), record_set, NULL);
2581
2582	      set = single_set (insn);
2583
2584	      if (set != 0
2585		  && REG_P (SET_DEST (set))
2586		  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2587		{
2588		  unsigned int regno = REGNO (SET_DEST (set));
2589		  rtx src = SET_SRC (set);
2590		  rtx t;
2591
2592		  if (REG_NOTES (insn) != 0
2593		      && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
2594			   && REG_N_SETS (regno) == 1)
2595			  || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
2596		      && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2597		      && ! rtx_varies_p (XEXP (note, 0), 1)
2598		      && ! reg_overlap_mentioned_p (SET_DEST (set),
2599						    XEXP (note, 0)))
2600		    {
2601		      set_reg_known_value (regno, XEXP (note, 0));
2602		      set_reg_known_equiv_p (regno,
2603			REG_NOTE_KIND (note) == REG_EQUIV);
2604		    }
2605		  else if (REG_N_SETS (regno) == 1
2606			   && GET_CODE (src) == PLUS
2607			   && REG_P (XEXP (src, 0))
2608			   && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2609			   && GET_CODE (XEXP (src, 1)) == CONST_INT)
2610		    {
2611		      t = plus_constant (t, INTVAL (XEXP (src, 1)));
2612		      set_reg_known_value (regno, t);
2613		      set_reg_known_equiv_p (regno, 0);
2614		    }
2615		  else if (REG_N_SETS (regno) == 1
2616			   && ! rtx_varies_p (src, 1))
2617		    {
2618		      set_reg_known_value (regno, src);
2619		      set_reg_known_equiv_p (regno, 0);
2620		    }
2621		}
2622	    }
2623	  else if (NOTE_P (insn)
2624		   && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
2625	    copying_arguments = false;
2626	}
2627
2628      /* Now propagate values from new_reg_base_value to reg_base_value.  */
2629      gcc_assert (maxreg == (unsigned int) max_reg_num());
2630
2631      for (ui = 0; ui < maxreg; ui++)
2632	{
2633	  if (new_reg_base_value[ui]
2634	      && new_reg_base_value[ui] != VARRAY_RTX (reg_base_value, ui)
2635	      && ! rtx_equal_p (new_reg_base_value[ui],
2636				VARRAY_RTX (reg_base_value, ui)))
2637	    {
2638	      VARRAY_RTX (reg_base_value, ui) = new_reg_base_value[ui];
2639	      changed = 1;
2640	    }
2641	}
2642    }
2643  while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2644
2645  /* Fill in the remaining entries.  */
2646  for (i = 0; i < (int)reg_known_value_size; i++)
2647    if (reg_known_value[i] == 0)
2648      reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2649
2650  /* Simplify the reg_base_value array so that no register refers to
2651     another register, except to special registers indirectly through
2652     ADDRESS expressions.
2653
2654     In theory this loop can take as long as O(registers^2), but unless
2655     there are very long dependency chains it will run in close to linear
2656     time.
2657
2658     This loop may not be needed any longer now that the main loop does
2659     a better job at propagating alias information.  */
2660  pass = 0;
2661  do
2662    {
2663      changed = 0;
2664      pass++;
2665      for (ui = 0; ui < maxreg; ui++)
2666	{
2667	  rtx base = VARRAY_RTX (reg_base_value, ui);
2668	  if (base && REG_P (base))
2669	    {
2670	      unsigned int base_regno = REGNO (base);
2671	      if (base_regno == ui)		/* register set from itself */
2672		VARRAY_RTX (reg_base_value, ui) = 0;
2673	      else
2674		VARRAY_RTX (reg_base_value, ui)
2675		  = VARRAY_RTX (reg_base_value, base_regno);
2676	      changed = 1;
2677	    }
2678	}
2679    }
2680  while (changed && pass < MAX_ALIAS_LOOP_PASSES);
2681
2682  /* Clean up.  */
2683  free (new_reg_base_value);
2684  new_reg_base_value = 0;
2685  free (reg_seen);
2686  reg_seen = 0;
2687  timevar_pop (TV_ALIAS_ANALYSIS);
2688}
2689
2690void
2691end_alias_analysis (void)
2692{
2693  old_reg_base_value = reg_base_value;
2694  ggc_free (reg_known_value);
2695  reg_known_value = 0;
2696  reg_known_value_size = 0;
2697  free (reg_known_equiv_p);
2698  reg_known_equiv_p = 0;
2699  if (alias_invariant)
2700    {
2701      ggc_free (alias_invariant);
2702      alias_invariant = 0;
2703      alias_invariant_size = 0;
2704    }
2705}
2706
2707/* Do control and data flow analysis; write some of the results to the
2708   dump file.  */
2709static void
2710rest_of_handle_cfg (void)
2711{
2712  if (dump_file)
2713    dump_flow_info (dump_file);
2714  if (optimize)
2715    cleanup_cfg (CLEANUP_EXPENSIVE
2716                 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2717}
2718
2719struct tree_opt_pass pass_cfg =
2720{
2721  "cfg",                                /* name */
2722  NULL,					/* gate */
2723  rest_of_handle_cfg,                   /* execute */
2724  NULL,                                 /* sub */
2725  NULL,                                 /* next */
2726  0,                                    /* static_pass_number */
2727  TV_FLOW,                              /* tv_id */
2728  0,                                    /* properties_required */
2729  0,                                    /* properties_provided */
2730  0,                                    /* properties_destroyed */
2731  0,                                    /* todo_flags_start */
2732  TODO_dump_func,                       /* todo_flags_finish */
2733  'f'                                   /* letter */
2734};
2735
2736
2737#include "gt-alias.h"
2738