1/* C-compiler utilities for types and variables storage layout
2   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA.  */
21
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "tree.h"
28#include "rtl.h"
29#include "tm_p.h"
30#include "flags.h"
31#include "function.h"
32#include "expr.h"
33#include "output.h"
34#include "toplev.h"
35#include "ggc.h"
36#include "target.h"
37#include "langhooks.h"
38#include "regs.h"
39#include "params.h"
40
41/* Data type for the expressions representing sizes of data types.
42   It is the first integer type laid out.  */
43tree sizetype_tab[(int) TYPE_KIND_LAST];
44
45/* If nonzero, this is an upper limit on alignment of structure fields.
46   The value is measured in bits.  */
47unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
48/* ... and its original value in bytes, specified via -fpack-struct=<value>.  */
49unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
50
51/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
52   allocated in Pmode, not ptr_mode.   Set only by internal_reference_types
53   called only by a front end.  */
54static int reference_types_internal = 0;
55
56static void finalize_record_size (record_layout_info);
57static void finalize_type_size (tree);
58static void place_union_field (record_layout_info, tree);
59#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
60static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
61			     HOST_WIDE_INT, tree);
62#endif
63extern void debug_rli (record_layout_info);
64
65/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded.  */
66
67static GTY(()) tree pending_sizes;
68
69/* Show that REFERENCE_TYPES are internal and should be Pmode.  Called only
70   by front end.  */
71
72void
73internal_reference_types (void)
74{
75  reference_types_internal = 1;
76}
77
78/* Get a list of all the objects put on the pending sizes list.  */
79
80tree
81get_pending_sizes (void)
82{
83  tree chain = pending_sizes;
84
85  pending_sizes = 0;
86  return chain;
87}
88
89/* Add EXPR to the pending sizes list.  */
90
91void
92put_pending_size (tree expr)
93{
94  /* Strip any simple arithmetic from EXPR to see if it has an underlying
95     SAVE_EXPR.  */
96  expr = skip_simple_arithmetic (expr);
97
98  if (TREE_CODE (expr) == SAVE_EXPR)
99    pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
100}
101
102/* Put a chain of objects into the pending sizes list, which must be
103   empty.  */
104
105void
106put_pending_sizes (tree chain)
107{
108  gcc_assert (!pending_sizes);
109  pending_sizes = chain;
110}
111
112/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
113   to serve as the actual size-expression for a type or decl.  */
114
115tree
116variable_size (tree size)
117{
118  tree save;
119
120  /* If the language-processor is to take responsibility for variable-sized
121     items (e.g., languages which have elaboration procedures like Ada),
122     just return SIZE unchanged.  Likewise for self-referential sizes and
123     constant sizes.  */
124  if (TREE_CONSTANT (size)
125      || lang_hooks.decls.global_bindings_p () < 0
126      || CONTAINS_PLACEHOLDER_P (size))
127    return size;
128
129  size = save_expr (size);
130
131  /* If an array with a variable number of elements is declared, and
132     the elements require destruction, we will emit a cleanup for the
133     array.  That cleanup is run both on normal exit from the block
134     and in the exception-handler for the block.  Normally, when code
135     is used in both ordinary code and in an exception handler it is
136     `unsaved', i.e., all SAVE_EXPRs are recalculated.  However, we do
137     not wish to do that here; the array-size is the same in both
138     places.  */
139  save = skip_simple_arithmetic (size);
140
141  if (cfun && cfun->x_dont_save_pending_sizes_p)
142    /* The front-end doesn't want us to keep a list of the expressions
143       that determine sizes for variable size objects.  Trust it.  */
144    return size;
145
146  if (lang_hooks.decls.global_bindings_p ())
147    {
148      if (TREE_CONSTANT (size))
149	error ("type size can%'t be explicitly evaluated");
150      else
151	error ("variable-size type declared outside of any function");
152
153      return size_one_node;
154    }
155
156  put_pending_size (save);
157
158  return size;
159}
160
161#ifndef MAX_FIXED_MODE_SIZE
162#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
163#endif
164
165/* Return the machine mode to use for a nonscalar of SIZE bits.  The
166   mode must be in class CLASS, and have exactly that many value bits;
167   it may have padding as well.  If LIMIT is nonzero, modes of wider
168   than MAX_FIXED_MODE_SIZE will not be used.  */
169
170enum machine_mode
171mode_for_size (unsigned int size, enum mode_class class, int limit)
172{
173  enum machine_mode mode;
174
175  if (limit && size > MAX_FIXED_MODE_SIZE)
176    return BLKmode;
177
178  /* Get the first mode which has this size, in the specified class.  */
179  for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
180       mode = GET_MODE_WIDER_MODE (mode))
181    if (GET_MODE_PRECISION (mode) == size)
182      return mode;
183
184  return BLKmode;
185}
186
187/* Similar, except passed a tree node.  */
188
189enum machine_mode
190mode_for_size_tree (tree size, enum mode_class class, int limit)
191{
192  if (TREE_CODE (size) != INTEGER_CST
193      || TREE_OVERFLOW (size)
194      /* What we really want to say here is that the size can fit in a
195	 host integer, but we know there's no way we'd find a mode for
196	 this many bits, so there's no point in doing the precise test.  */
197      || compare_tree_int (size, 1000) > 0)
198    return BLKmode;
199  else
200    return mode_for_size (tree_low_cst (size, 1), class, limit);
201}
202
203/* Similar, but never return BLKmode; return the narrowest mode that
204   contains at least the requested number of value bits.  */
205
206enum machine_mode
207smallest_mode_for_size (unsigned int size, enum mode_class class)
208{
209  enum machine_mode mode;
210
211  /* Get the first mode which has at least this size, in the
212     specified class.  */
213  for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
214       mode = GET_MODE_WIDER_MODE (mode))
215    if (GET_MODE_PRECISION (mode) >= size)
216      return mode;
217
218  gcc_unreachable ();
219}
220
221/* Find an integer mode of the exact same size, or BLKmode on failure.  */
222
223enum machine_mode
224int_mode_for_mode (enum machine_mode mode)
225{
226  switch (GET_MODE_CLASS (mode))
227    {
228    case MODE_INT:
229    case MODE_PARTIAL_INT:
230      break;
231
232    case MODE_COMPLEX_INT:
233    case MODE_COMPLEX_FLOAT:
234    case MODE_FLOAT:
235    case MODE_VECTOR_INT:
236    case MODE_VECTOR_FLOAT:
237      mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
238      break;
239
240    case MODE_RANDOM:
241      if (mode == BLKmode)
242	break;
243
244      /* ... fall through ...  */
245
246    case MODE_CC:
247    default:
248      gcc_unreachable ();
249    }
250
251  return mode;
252}
253
254/* Return the alignment of MODE. This will be bounded by 1 and
255   BIGGEST_ALIGNMENT.  */
256
257unsigned int
258get_mode_alignment (enum machine_mode mode)
259{
260  return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
261}
262
263
264/* Subroutine of layout_decl: Force alignment required for the data type.
265   But if the decl itself wants greater alignment, don't override that.  */
266
267static inline void
268do_type_align (tree type, tree decl)
269{
270  if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
271    {
272      DECL_ALIGN (decl) = TYPE_ALIGN (type);
273      if (TREE_CODE (decl) == FIELD_DECL)
274	DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
275    }
276}
277
278/* Set the size, mode and alignment of a ..._DECL node.
279   TYPE_DECL does need this for C++.
280   Note that LABEL_DECL and CONST_DECL nodes do not need this,
281   and FUNCTION_DECL nodes have them set up in a special (and simple) way.
282   Don't call layout_decl for them.
283
284   KNOWN_ALIGN is the amount of alignment we can assume this
285   decl has with no special effort.  It is relevant only for FIELD_DECLs
286   and depends on the previous fields.
287   All that matters about KNOWN_ALIGN is which powers of 2 divide it.
288   If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
289   the record will be aligned to suit.  */
290
291void
292layout_decl (tree decl, unsigned int known_align)
293{
294  tree type = TREE_TYPE (decl);
295  enum tree_code code = TREE_CODE (decl);
296  rtx rtl = NULL_RTX;
297
298  if (code == CONST_DECL)
299    return;
300
301  gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
302	      || code == TYPE_DECL ||code == FIELD_DECL);
303
304  rtl = DECL_RTL_IF_SET (decl);
305
306  if (type == error_mark_node)
307    type = void_type_node;
308
309  /* Usually the size and mode come from the data type without change,
310     however, the front-end may set the explicit width of the field, so its
311     size may not be the same as the size of its type.  This happens with
312     bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
313     also happens with other fields.  For example, the C++ front-end creates
314     zero-sized fields corresponding to empty base classes, and depends on
315     layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
316     size in bytes from the size in bits.  If we have already set the mode,
317     don't set it again since we can be called twice for FIELD_DECLs.  */
318
319  DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
320  if (DECL_MODE (decl) == VOIDmode)
321    DECL_MODE (decl) = TYPE_MODE (type);
322
323  if (DECL_SIZE (decl) == 0)
324    {
325      DECL_SIZE (decl) = TYPE_SIZE (type);
326      DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
327    }
328  else if (DECL_SIZE_UNIT (decl) == 0)
329    DECL_SIZE_UNIT (decl)
330      = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
331					    bitsize_unit_node));
332
333  if (code != FIELD_DECL)
334    /* For non-fields, update the alignment from the type.  */
335    do_type_align (type, decl);
336  else
337    /* For fields, it's a bit more complicated...  */
338    {
339      bool old_user_align = DECL_USER_ALIGN (decl);
340      bool zero_bitfield = false;
341      unsigned int mfa;
342
343      if (DECL_BIT_FIELD (decl))
344	{
345	  DECL_BIT_FIELD_TYPE (decl) = type;
346
347	  /* A zero-length bit-field affects the alignment of the next
348	     field.  */
349	  if (integer_zerop (DECL_SIZE (decl))
350	      && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
351	    {
352	      zero_bitfield = true;
353#ifdef PCC_BITFIELD_TYPE_MATTERS
354	      if (PCC_BITFIELD_TYPE_MATTERS)
355		do_type_align (type, decl);
356	      else
357#endif
358		{
359#ifdef EMPTY_FIELD_BOUNDARY
360		  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
361		    {
362		      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
363		      DECL_USER_ALIGN (decl) = 0;
364		    }
365#endif
366		}
367	    }
368
369	  /* See if we can use an ordinary integer mode for a bit-field.
370	     Conditions are: a fixed size that is correct for another mode
371	     and occupying a complete byte or bytes on proper boundary.  */
372	  if (TYPE_SIZE (type) != 0
373	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
374	      && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
375	    {
376	      enum machine_mode xmode
377		= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
378
379	      if (xmode != BLKmode
380		  && (known_align == 0
381		      || known_align >= GET_MODE_ALIGNMENT (xmode)))
382		{
383		  DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
384					   DECL_ALIGN (decl));
385		  DECL_MODE (decl) = xmode;
386		  DECL_BIT_FIELD (decl) = 0;
387		}
388	    }
389
390	  /* Turn off DECL_BIT_FIELD if we won't need it set.  */
391	  if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
392	      && known_align >= TYPE_ALIGN (type)
393	      && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
394	    DECL_BIT_FIELD (decl) = 0;
395	}
396      else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl))
397	/* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
398	   round up; we'll reduce it again below.  We want packing to
399	   supersede USER_ALIGN inherited from the type, but defer to
400	   alignment explicitly specified on the field decl.  */;
401      else
402	do_type_align (type, decl);
403
404      /* If the field is of variable size, we can't misalign it since we
405	 have no way to make a temporary to align the result.  But this
406	 isn't an issue if the decl is not addressable.  Likewise if it
407	 is of unknown size.
408
409	 Note that do_type_align may set DECL_USER_ALIGN, so we need to
410	 check old_user_align instead.  */
411      if (DECL_PACKED (decl)
412	  && !old_user_align
413	  && !zero_bitfield
414	  && (DECL_NONADDRESSABLE_P (decl)
415	      || DECL_SIZE_UNIT (decl) == 0
416	      || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
417	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
418
419      if (! DECL_USER_ALIGN (decl) && (! DECL_PACKED (decl) || zero_bitfield))
420	{
421	  /* Some targets (i.e. i386, VMS) limit struct field alignment
422	     to a lower boundary than alignment of variables unless
423	     it was overridden by attribute aligned.  */
424#ifdef BIGGEST_FIELD_ALIGNMENT
425	  DECL_ALIGN (decl)
426	    = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
427#endif
428#ifdef ADJUST_FIELD_ALIGN
429	  DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
430#endif
431	}
432
433      if (zero_bitfield)
434        mfa = initial_max_fld_align * BITS_PER_UNIT;
435      else
436	mfa = maximum_field_alignment;
437      /* Should this be controlled by DECL_USER_ALIGN, too?  */
438      if (mfa != 0)
439	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
440    }
441
442  /* Evaluate nonconstant size only once, either now or as soon as safe.  */
443  if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
444    DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
445  if (DECL_SIZE_UNIT (decl) != 0
446      && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
447    DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
448
449  /* If requested, warn about definitions of large data objects.  */
450  if (warn_larger_than
451      && (code == VAR_DECL || code == PARM_DECL)
452      && ! DECL_EXTERNAL (decl))
453    {
454      tree size = DECL_SIZE_UNIT (decl);
455
456      if (size != 0 && TREE_CODE (size) == INTEGER_CST
457	  && compare_tree_int (size, larger_than_size) > 0)
458	{
459	  int size_as_int = TREE_INT_CST_LOW (size);
460
461	  if (compare_tree_int (size, size_as_int) == 0)
462	    warning (0, "size of %q+D is %d bytes", decl, size_as_int);
463	  else
464	    warning (0, "size of %q+D is larger than %wd bytes",
465                     decl, larger_than_size);
466	}
467    }
468
469  /* If the RTL was already set, update its mode and mem attributes.  */
470  if (rtl)
471    {
472      PUT_MODE (rtl, DECL_MODE (decl));
473      SET_DECL_RTL (decl, 0);
474      set_mem_attributes (rtl, decl, 1);
475      SET_DECL_RTL (decl, rtl);
476    }
477}
478
479/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
480   a previous call to layout_decl and calls it again.  */
481
482void
483relayout_decl (tree decl)
484{
485  DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
486  DECL_MODE (decl) = VOIDmode;
487  if (!DECL_USER_ALIGN (decl))
488    DECL_ALIGN (decl) = 0;
489  SET_DECL_RTL (decl, 0);
490
491  layout_decl (decl, 0);
492}
493
494/* Hook for a front-end function that can modify the record layout as needed
495   immediately before it is finalized.  */
496
497static void (*lang_adjust_rli) (record_layout_info) = 0;
498
499void
500set_lang_adjust_rli (void (*f) (record_layout_info))
501{
502  lang_adjust_rli = f;
503}
504
505/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
506   QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
507   is to be passed to all other layout functions for this record.  It is the
508   responsibility of the caller to call `free' for the storage returned.
509   Note that garbage collection is not permitted until we finish laying
510   out the record.  */
511
512record_layout_info
513start_record_layout (tree t)
514{
515  record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
516
517  rli->t = t;
518
519  /* If the type has a minimum specified alignment (via an attribute
520     declaration, for example) use it -- otherwise, start with a
521     one-byte alignment.  */
522  rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
523  rli->unpacked_align = rli->record_align;
524  rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
525
526#ifdef STRUCTURE_SIZE_BOUNDARY
527  /* Packed structures don't need to have minimum size.  */
528  if (! TYPE_PACKED (t))
529    rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY);
530#endif
531
532  rli->offset = size_zero_node;
533  rli->bitpos = bitsize_zero_node;
534  rli->prev_field = 0;
535  rli->pending_statics = 0;
536  rli->packed_maybe_necessary = 0;
537
538  return rli;
539}
540
541/* These four routines perform computations that convert between
542   the offset/bitpos forms and byte and bit offsets.  */
543
544tree
545bit_from_pos (tree offset, tree bitpos)
546{
547  return size_binop (PLUS_EXPR, bitpos,
548		     size_binop (MULT_EXPR,
549				 fold_convert (bitsizetype, offset),
550				 bitsize_unit_node));
551}
552
553tree
554byte_from_pos (tree offset, tree bitpos)
555{
556  return size_binop (PLUS_EXPR, offset,
557		     fold_convert (sizetype,
558				   size_binop (TRUNC_DIV_EXPR, bitpos,
559					       bitsize_unit_node)));
560}
561
562void
563pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
564	      tree pos)
565{
566  *poffset = size_binop (MULT_EXPR,
567			 fold_convert (sizetype,
568				       size_binop (FLOOR_DIV_EXPR, pos,
569						   bitsize_int (off_align))),
570			 size_int (off_align / BITS_PER_UNIT));
571  *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
572}
573
574/* Given a pointer to bit and byte offsets and an offset alignment,
575   normalize the offsets so they are within the alignment.  */
576
577void
578normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
579{
580  /* If the bit position is now larger than it should be, adjust it
581     downwards.  */
582  if (compare_tree_int (*pbitpos, off_align) >= 0)
583    {
584      tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
585				      bitsize_int (off_align));
586
587      *poffset
588	= size_binop (PLUS_EXPR, *poffset,
589		      size_binop (MULT_EXPR,
590				  fold_convert (sizetype, extra_aligns),
591				  size_int (off_align / BITS_PER_UNIT)));
592
593      *pbitpos
594	= size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
595    }
596}
597
598/* Print debugging information about the information in RLI.  */
599
600void
601debug_rli (record_layout_info rli)
602{
603  print_node_brief (stderr, "type", rli->t, 0);
604  print_node_brief (stderr, "\noffset", rli->offset, 0);
605  print_node_brief (stderr, " bitpos", rli->bitpos, 0);
606
607  fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
608	   rli->record_align, rli->unpacked_align,
609	   rli->offset_align);
610  if (rli->packed_maybe_necessary)
611    fprintf (stderr, "packed may be necessary\n");
612
613  if (rli->pending_statics)
614    {
615      fprintf (stderr, "pending statics:\n");
616      debug_tree (rli->pending_statics);
617    }
618}
619
620/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
621   BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
622
623void
624normalize_rli (record_layout_info rli)
625{
626  normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
627}
628
629/* Returns the size in bytes allocated so far.  */
630
631tree
632rli_size_unit_so_far (record_layout_info rli)
633{
634  return byte_from_pos (rli->offset, rli->bitpos);
635}
636
637/* Returns the size in bits allocated so far.  */
638
639tree
640rli_size_so_far (record_layout_info rli)
641{
642  return bit_from_pos (rli->offset, rli->bitpos);
643}
644
645/* FIELD is about to be added to RLI->T.  The alignment (in bits) of
646   the next available location within the record is given by KNOWN_ALIGN.
647   Update the variable alignment fields in RLI, and return the alignment
648   to give the FIELD.  */
649
650unsigned int
651update_alignment_for_field (record_layout_info rli, tree field,
652			    unsigned int known_align)
653{
654  /* The alignment required for FIELD.  */
655  unsigned int desired_align;
656  /* The type of this field.  */
657  tree type = TREE_TYPE (field);
658  /* True if the field was explicitly aligned by the user.  */
659  bool user_align;
660  bool is_bitfield;
661
662  /* Do not attempt to align an ERROR_MARK node */
663  if (TREE_CODE (type) == ERROR_MARK)
664    return 0;
665
666  /* Lay out the field so we know what alignment it needs.  */
667  layout_decl (field, known_align);
668  desired_align = DECL_ALIGN (field);
669  user_align = DECL_USER_ALIGN (field);
670
671  is_bitfield = (type != error_mark_node
672		 && DECL_BIT_FIELD_TYPE (field)
673		 && ! integer_zerop (TYPE_SIZE (type)));
674
675  /* Record must have at least as much alignment as any field.
676     Otherwise, the alignment of the field within the record is
677     meaningless.  */
678  if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t))
679    {
680      /* Here, the alignment of the underlying type of a bitfield can
681	 affect the alignment of a record; even a zero-sized field
682	 can do this.  The alignment should be to the alignment of
683	 the type, except that for zero-size bitfields this only
684	 applies if there was an immediately prior, nonzero-size
685	 bitfield.  (That's the way it is, experimentally.) */
686      if (! integer_zerop (DECL_SIZE (field))
687	  ? ! DECL_PACKED (field)
688	  : (rli->prev_field
689	     && DECL_BIT_FIELD_TYPE (rli->prev_field)
690	     && ! integer_zerop (DECL_SIZE (rli->prev_field))))
691	{
692	  unsigned int type_align = TYPE_ALIGN (type);
693	  type_align = MAX (type_align, desired_align);
694	  if (maximum_field_alignment != 0)
695	    type_align = MIN (type_align, maximum_field_alignment);
696	  rli->record_align = MAX (rli->record_align, type_align);
697	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
698	  /* If we start a new run, make sure we start it properly aligned.  */
699	  if ((!rli->prev_field
700	       || integer_zerop (DECL_SIZE (field))
701	       || integer_zerop (DECL_SIZE (rli->prev_field))
702	       || !host_integerp (DECL_SIZE (rli->prev_field), 0)
703	       || !host_integerp (TYPE_SIZE (type), 0)
704	       || !simple_cst_equal (TYPE_SIZE (type),
705				     TYPE_SIZE (TREE_TYPE (rli->prev_field)))
706	       || (rli->remaining_in_alignment
707		   < tree_low_cst (DECL_SIZE (field), 0)))
708	      && desired_align < type_align)
709	    desired_align = type_align;
710	}
711    }
712#ifdef PCC_BITFIELD_TYPE_MATTERS
713  else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
714    {
715      /* Named bit-fields cause the entire structure to have the
716	 alignment implied by their type.  Some targets also apply the same
717	 rules to unnamed bitfields.  */
718      if (DECL_NAME (field) != 0
719	  || targetm.align_anon_bitfield ())
720	{
721	  unsigned int type_align = TYPE_ALIGN (type);
722
723#ifdef ADJUST_FIELD_ALIGN
724	  if (! TYPE_USER_ALIGN (type))
725	    type_align = ADJUST_FIELD_ALIGN (field, type_align);
726#endif
727
728	  /* Targets might chose to handle unnamed and hence possibly
729	     zero-width bitfield.  Those are not influenced by #pragmas
730	     or packed attributes.  */
731	  if (integer_zerop (DECL_SIZE (field)))
732	    {
733	      if (initial_max_fld_align)
734	        type_align = MIN (type_align,
735				  initial_max_fld_align * BITS_PER_UNIT);
736	    }
737	  else if (maximum_field_alignment != 0)
738	    type_align = MIN (type_align, maximum_field_alignment);
739	  else if (DECL_PACKED (field))
740	    type_align = MIN (type_align, BITS_PER_UNIT);
741
742	  /* The alignment of the record is increased to the maximum
743	     of the current alignment, the alignment indicated on the
744	     field (i.e., the alignment specified by an __aligned__
745	     attribute), and the alignment indicated by the type of
746	     the field.  */
747	  rli->record_align = MAX (rli->record_align, desired_align);
748	  rli->record_align = MAX (rli->record_align, type_align);
749
750	  if (warn_packed)
751	    rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
752	  user_align |= TYPE_USER_ALIGN (type);
753	}
754    }
755#endif
756  else
757    {
758      rli->record_align = MAX (rli->record_align, desired_align);
759      rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
760    }
761
762  TYPE_USER_ALIGN (rli->t) |= user_align;
763
764  return desired_align;
765}
766
767/* Called from place_field to handle unions.  */
768
769static void
770place_union_field (record_layout_info rli, tree field)
771{
772  update_alignment_for_field (rli, field, /*known_align=*/0);
773
774  DECL_FIELD_OFFSET (field) = size_zero_node;
775  DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
776  SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
777
778  /* If this is an ERROR_MARK return *after* having set the
779     field at the start of the union. This helps when parsing
780     invalid fields. */
781  if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
782    return;
783
784  /* We assume the union's size will be a multiple of a byte so we don't
785     bother with BITPOS.  */
786  if (TREE_CODE (rli->t) == UNION_TYPE)
787    rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
788  else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
789    rli->offset = fold_build3 (COND_EXPR, sizetype,
790			       DECL_QUALIFIER (field),
791			       DECL_SIZE_UNIT (field), rli->offset);
792}
793
794#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
795/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
796   at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
797   units of alignment than the underlying TYPE.  */
798static int
799excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
800		  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
801{
802  /* Note that the calculation of OFFSET might overflow; we calculate it so
803     that we still get the right result as long as ALIGN is a power of two.  */
804  unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
805
806  offset = offset % align;
807  return ((offset + size + align - 1) / align
808	  > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
809	     / align));
810}
811#endif
812
813/* RLI contains information about the layout of a RECORD_TYPE.  FIELD
814   is a FIELD_DECL to be added after those fields already present in
815   T.  (FIELD is not actually added to the TYPE_FIELDS list here;
816   callers that desire that behavior must manually perform that step.)  */
817
818void
819place_field (record_layout_info rli, tree field)
820{
821  /* The alignment required for FIELD.  */
822  unsigned int desired_align;
823  /* The alignment FIELD would have if we just dropped it into the
824     record as it presently stands.  */
825  unsigned int known_align;
826  unsigned int actual_align;
827  /* The type of this field.  */
828  tree type = TREE_TYPE (field);
829
830  gcc_assert (TREE_CODE (field) != ERROR_MARK);
831
832  /* If FIELD is static, then treat it like a separate variable, not
833     really like a structure field.  If it is a FUNCTION_DECL, it's a
834     method.  In both cases, all we do is lay out the decl, and we do
835     it *after* the record is laid out.  */
836  if (TREE_CODE (field) == VAR_DECL)
837    {
838      rli->pending_statics = tree_cons (NULL_TREE, field,
839					rli->pending_statics);
840      return;
841    }
842
843  /* Enumerators and enum types which are local to this class need not
844     be laid out.  Likewise for initialized constant fields.  */
845  else if (TREE_CODE (field) != FIELD_DECL)
846    return;
847
848  /* Unions are laid out very differently than records, so split
849     that code off to another function.  */
850  else if (TREE_CODE (rli->t) != RECORD_TYPE)
851    {
852      place_union_field (rli, field);
853      return;
854    }
855
856  else if (TREE_CODE (type) == ERROR_MARK)
857    {
858      /* Place this field at the current allocation position, so we
859	 maintain monotonicity.  */
860      DECL_FIELD_OFFSET (field) = rli->offset;
861      DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
862      SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
863      return;
864    }
865
866  /* Work out the known alignment so far.  Note that A & (-A) is the
867     value of the least-significant bit in A that is one.  */
868  if (! integer_zerop (rli->bitpos))
869    known_align = (tree_low_cst (rli->bitpos, 1)
870		   & - tree_low_cst (rli->bitpos, 1));
871  else if (integer_zerop (rli->offset))
872    known_align = 0;
873  else if (host_integerp (rli->offset, 1))
874    known_align = (BITS_PER_UNIT
875		   * (tree_low_cst (rli->offset, 1)
876		      & - tree_low_cst (rli->offset, 1)));
877  else
878    known_align = rli->offset_align;
879
880  desired_align = update_alignment_for_field (rli, field, known_align);
881  if (known_align == 0)
882    known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
883
884  if (warn_packed && DECL_PACKED (field))
885    {
886      if (known_align >= TYPE_ALIGN (type))
887	{
888	  if (TYPE_ALIGN (type) > desired_align)
889	    {
890	      if (STRICT_ALIGNMENT)
891		warning (OPT_Wattributes, "packed attribute causes "
892                         "inefficient alignment for %q+D", field);
893	      else
894		warning (OPT_Wattributes, "packed attribute is "
895			 "unnecessary for %q+D", field);
896	    }
897	}
898      else
899	rli->packed_maybe_necessary = 1;
900    }
901
902  /* Does this field automatically have alignment it needs by virtue
903     of the fields that precede it and the record's own alignment?  */
904  if (known_align < desired_align)
905    {
906      /* No, we need to skip space before this field.
907	 Bump the cumulative size to multiple of field alignment.  */
908
909      warning (OPT_Wpadded, "padding struct to align %q+D", field);
910
911      /* If the alignment is still within offset_align, just align
912	 the bit position.  */
913      if (desired_align < rli->offset_align)
914	rli->bitpos = round_up (rli->bitpos, desired_align);
915      else
916	{
917	  /* First adjust OFFSET by the partial bits, then align.  */
918	  rli->offset
919	    = size_binop (PLUS_EXPR, rli->offset,
920			  fold_convert (sizetype,
921					size_binop (CEIL_DIV_EXPR, rli->bitpos,
922						    bitsize_unit_node)));
923	  rli->bitpos = bitsize_zero_node;
924
925	  rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
926	}
927
928      if (! TREE_CONSTANT (rli->offset))
929	rli->offset_align = desired_align;
930
931    }
932
933  /* Handle compatibility with PCC.  Note that if the record has any
934     variable-sized fields, we need not worry about compatibility.  */
935#ifdef PCC_BITFIELD_TYPE_MATTERS
936  if (PCC_BITFIELD_TYPE_MATTERS
937      && ! targetm.ms_bitfield_layout_p (rli->t)
938      && TREE_CODE (field) == FIELD_DECL
939      && type != error_mark_node
940      && DECL_BIT_FIELD (field)
941      && ! DECL_PACKED (field)
942      && maximum_field_alignment == 0
943      && ! integer_zerop (DECL_SIZE (field))
944      && host_integerp (DECL_SIZE (field), 1)
945      && host_integerp (rli->offset, 1)
946      && host_integerp (TYPE_SIZE (type), 1))
947    {
948      unsigned int type_align = TYPE_ALIGN (type);
949      tree dsize = DECL_SIZE (field);
950      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
951      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
952      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
953
954#ifdef ADJUST_FIELD_ALIGN
955      if (! TYPE_USER_ALIGN (type))
956	type_align = ADJUST_FIELD_ALIGN (field, type_align);
957#endif
958
959      /* A bit field may not span more units of alignment of its type
960	 than its type itself.  Advance to next boundary if necessary.  */
961      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
962	rli->bitpos = round_up (rli->bitpos, type_align);
963
964      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
965    }
966#endif
967
968#ifdef BITFIELD_NBYTES_LIMITED
969  if (BITFIELD_NBYTES_LIMITED
970      && ! targetm.ms_bitfield_layout_p (rli->t)
971      && TREE_CODE (field) == FIELD_DECL
972      && type != error_mark_node
973      && DECL_BIT_FIELD_TYPE (field)
974      && ! DECL_PACKED (field)
975      && ! integer_zerop (DECL_SIZE (field))
976      && host_integerp (DECL_SIZE (field), 1)
977      && host_integerp (rli->offset, 1)
978      && host_integerp (TYPE_SIZE (type), 1))
979    {
980      unsigned int type_align = TYPE_ALIGN (type);
981      tree dsize = DECL_SIZE (field);
982      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
983      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
984      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
985
986#ifdef ADJUST_FIELD_ALIGN
987      if (! TYPE_USER_ALIGN (type))
988	type_align = ADJUST_FIELD_ALIGN (field, type_align);
989#endif
990
991      if (maximum_field_alignment != 0)
992	type_align = MIN (type_align, maximum_field_alignment);
993      /* ??? This test is opposite the test in the containing if
994	 statement, so this code is unreachable currently.  */
995      else if (DECL_PACKED (field))
996	type_align = MIN (type_align, BITS_PER_UNIT);
997
998      /* A bit field may not span the unit of alignment of its type.
999	 Advance to next boundary if necessary.  */
1000      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1001	rli->bitpos = round_up (rli->bitpos, type_align);
1002
1003      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1004    }
1005#endif
1006
1007  /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1008     A subtlety:
1009	When a bit field is inserted into a packed record, the whole
1010	size of the underlying type is used by one or more same-size
1011	adjacent bitfields.  (That is, if its long:3, 32 bits is
1012	used in the record, and any additional adjacent long bitfields are
1013	packed into the same chunk of 32 bits. However, if the size
1014	changes, a new field of that size is allocated.)  In an unpacked
1015	record, this is the same as using alignment, but not equivalent
1016	when packing.
1017
1018     Note: for compatibility, we use the type size, not the type alignment
1019     to determine alignment, since that matches the documentation */
1020
1021  if (targetm.ms_bitfield_layout_p (rli->t)
1022       && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
1023	  || (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
1024    {
1025      /* At this point, either the prior or current are bitfields,
1026	 (possibly both), and we're dealing with MS packing.  */
1027      tree prev_saved = rli->prev_field;
1028
1029      /* Is the prior field a bitfield?  If so, handle "runs" of same
1030	 type size fields.  */
1031      if (rli->prev_field /* necessarily a bitfield if it exists.  */)
1032	{
1033	  /* If both are bitfields, nonzero, and the same size, this is
1034	     the middle of a run.  Zero declared size fields are special
1035	     and handled as "end of run". (Note: it's nonzero declared
1036	     size, but equal type sizes!) (Since we know that both
1037	     the current and previous fields are bitfields by the
1038	     time we check it, DECL_SIZE must be present for both.) */
1039	  if (DECL_BIT_FIELD_TYPE (field)
1040	      && !integer_zerop (DECL_SIZE (field))
1041	      && !integer_zerop (DECL_SIZE (rli->prev_field))
1042	      && host_integerp (DECL_SIZE (rli->prev_field), 0)
1043	      && host_integerp (TYPE_SIZE (type), 0)
1044	      && simple_cst_equal (TYPE_SIZE (type),
1045				   TYPE_SIZE (TREE_TYPE (rli->prev_field))))
1046	    {
1047	      /* We're in the middle of a run of equal type size fields; make
1048		 sure we realign if we run out of bits.  (Not decl size,
1049		 type size!) */
1050	      HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
1051
1052	      if (rli->remaining_in_alignment < bitsize)
1053		{
1054		  /* If PREV_FIELD is packed, and we haven't lumped
1055		     non-packed bitfields with it, treat this as if PREV_FIELD
1056		     was not a bitfield.  This avoids anomalies where a packed
1057		     bitfield with long long base type can take up more
1058		     space than a same-size bitfield with base type short.  */
1059		  if (rli->prev_packed)
1060		    rli->prev_field = prev_saved = NULL;
1061		  else
1062		    {
1063		      /* out of bits; bump up to next 'word'.  */
1064		      rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
1065		      rli->bitpos
1066			= size_binop (PLUS_EXPR, TYPE_SIZE (type),
1067				      DECL_FIELD_BIT_OFFSET (rli->prev_field));
1068		      rli->prev_field = field;
1069		      rli->remaining_in_alignment
1070			= tree_low_cst (TYPE_SIZE (type), 0) - bitsize;
1071		    }
1072		}
1073	      else
1074		rli->remaining_in_alignment -= bitsize;
1075	    }
1076	  else if (rli->prev_packed)
1077	    rli->prev_field = prev_saved = NULL;
1078	  else
1079	    {
1080	      /* End of a run: if leaving a run of bitfields of the same type
1081		 size, we have to "use up" the rest of the bits of the type
1082		 size.
1083
1084		 Compute the new position as the sum of the size for the prior
1085		 type and where we first started working on that type.
1086		 Note: since the beginning of the field was aligned then
1087		 of course the end will be too.  No round needed.  */
1088
1089	      if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1090		{
1091		  tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
1092
1093		  /* If the desired alignment is greater or equal to TYPE_SIZE,
1094		     we have already adjusted rli->bitpos / rli->offset above.
1095		   */
1096		  if ((unsigned HOST_WIDE_INT) tree_low_cst (type_size, 0)
1097		      > desired_align)
1098		    rli->bitpos
1099		      = size_binop (PLUS_EXPR, type_size,
1100				    DECL_FIELD_BIT_OFFSET (rli->prev_field));
1101		}
1102	      else
1103		/* We "use up" size zero fields; the code below should behave
1104		   as if the prior field was not a bitfield.  */
1105		prev_saved = NULL;
1106
1107	      /* Cause a new bitfield to be captured, either this time (if
1108		 currently a bitfield) or next time we see one.  */
1109	      if (!DECL_BIT_FIELD_TYPE(field)
1110		 || integer_zerop (DECL_SIZE (field)))
1111		rli->prev_field = NULL;
1112	    }
1113
1114	  rli->prev_packed = 0;
1115	  normalize_rli (rli);
1116        }
1117
1118      /* If we're starting a new run of same size type bitfields
1119	 (or a run of non-bitfields), set up the "first of the run"
1120	 fields.
1121
1122	 That is, if the current field is not a bitfield, or if there
1123	 was a prior bitfield the type sizes differ, or if there wasn't
1124	 a prior bitfield the size of the current field is nonzero.
1125
1126	 Note: we must be sure to test ONLY the type size if there was
1127	 a prior bitfield and ONLY for the current field being zero if
1128	 there wasn't.  */
1129
1130      if (!DECL_BIT_FIELD_TYPE (field)
1131	  || ( prev_saved != NULL
1132	       ? !simple_cst_equal (TYPE_SIZE (type),
1133				    TYPE_SIZE (TREE_TYPE (prev_saved)))
1134	      : !integer_zerop (DECL_SIZE (field)) ))
1135	{
1136	  /* Never smaller than a byte for compatibility.  */
1137	  unsigned int type_align = BITS_PER_UNIT;
1138
1139	  /* (When not a bitfield), we could be seeing a flex array (with
1140	     no DECL_SIZE).  Since we won't be using remaining_in_alignment
1141	     until we see a bitfield (and come by here again) we just skip
1142	     calculating it.  */
1143	  if (DECL_SIZE (field) != NULL
1144	      && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
1145	      && host_integerp (DECL_SIZE (field), 0))
1146	    rli->remaining_in_alignment
1147	      = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0)
1148		- tree_low_cst (DECL_SIZE (field), 0);
1149
1150	  /* Now align (conventionally) for the new type.  */
1151	  if (!DECL_PACKED(field))
1152	    type_align = MAX(TYPE_ALIGN (type), type_align);
1153
1154	  if (prev_saved
1155	      && DECL_BIT_FIELD_TYPE (prev_saved)
1156	      /* If the previous bit-field is zero-sized, we've already
1157		 accounted for its alignment needs (or ignored it, if
1158		 appropriate) while placing it.  */
1159	      && ! integer_zerop (DECL_SIZE (prev_saved)))
1160	    type_align = MAX (type_align,
1161			      TYPE_ALIGN (TREE_TYPE (prev_saved)));
1162
1163	  if (maximum_field_alignment != 0)
1164	    type_align = MIN (type_align, maximum_field_alignment);
1165
1166	  rli->bitpos = round_up (rli->bitpos, type_align);
1167
1168          /* If we really aligned, don't allow subsequent bitfields
1169	     to undo that.  */
1170	  rli->prev_field = NULL;
1171	}
1172    }
1173
1174  /* Offset so far becomes the position of this field after normalizing.  */
1175  normalize_rli (rli);
1176  DECL_FIELD_OFFSET (field) = rli->offset;
1177  DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1178  SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1179
1180  /* If this field ended up more aligned than we thought it would be (we
1181     approximate this by seeing if its position changed), lay out the field
1182     again; perhaps we can use an integral mode for it now.  */
1183  if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1184    actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1185		    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1186  else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1187    actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1188  else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1189    actual_align = (BITS_PER_UNIT
1190		   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1191		      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1192  else
1193    actual_align = DECL_OFFSET_ALIGN (field);
1194  /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1195     store / extract bit field operations will check the alignment of the
1196     record against the mode of bit fields.  */
1197
1198  if (known_align != actual_align)
1199    layout_decl (field, actual_align);
1200
1201  if (DECL_BIT_FIELD_TYPE (field))
1202    {
1203      unsigned int type_align = TYPE_ALIGN (type);
1204      unsigned int mfa = maximum_field_alignment;
1205
1206      if (integer_zerop (DECL_SIZE (field)))
1207        mfa = initial_max_fld_align * BITS_PER_UNIT;
1208
1209      /* Only the MS bitfields use this.  We used to also put any kind of
1210	 packed bit fields into prev_field, but that makes no sense, because
1211	 an 8 bit packed bit field shouldn't impose more restriction on
1212	 following fields than a char field, and the alignment requirements
1213	 are also not fulfilled.
1214	 There is no sane value to set rli->remaining_in_alignment to when
1215	 a packed bitfield in prev_field is unaligned.  */
1216      if (mfa != 0)
1217	type_align = MIN (type_align, mfa);
1218      gcc_assert (rli->prev_field
1219		  || actual_align >= type_align || DECL_PACKED (field)
1220		  || integer_zerop (DECL_SIZE (field))
1221		  || !targetm.ms_bitfield_layout_p (rli->t));
1222      if (rli->prev_field == NULL && actual_align >= type_align
1223	  && !integer_zerop (DECL_SIZE (field)))
1224	{
1225	  rli->prev_field = field;
1226	  /* rli->remaining_in_alignment has not been set if the bitfield
1227	     has size zero, or if it is a packed bitfield.  */
1228	  rli->remaining_in_alignment
1229	    = (tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 0)
1230	       - tree_low_cst (DECL_SIZE (field), 0));
1231	  rli->prev_packed = DECL_PACKED (field);
1232
1233	}
1234      else if (rli->prev_field && DECL_PACKED (field))
1235	{
1236	  HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
1237
1238	  if (rli->remaining_in_alignment < bitsize)
1239	    rli->prev_field = NULL;
1240	  else
1241	    rli->remaining_in_alignment -= bitsize;
1242	}
1243    }
1244
1245  /* Now add size of this field to the size of the record.  If the size is
1246     not constant, treat the field as being a multiple of bytes and just
1247     adjust the offset, resetting the bit position.  Otherwise, apportion the
1248     size amongst the bit position and offset.  First handle the case of an
1249     unspecified size, which can happen when we have an invalid nested struct
1250     definition, such as struct j { struct j { int i; } }.  The error message
1251     is printed in finish_struct.  */
1252  if (DECL_SIZE (field) == 0)
1253    /* Do nothing.  */;
1254  else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST
1255	   || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field)))
1256    {
1257      rli->offset
1258	= size_binop (PLUS_EXPR, rli->offset,
1259		      fold_convert (sizetype,
1260				    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1261						bitsize_unit_node)));
1262      rli->offset
1263	= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1264      rli->bitpos = bitsize_zero_node;
1265      rli->offset_align = MIN (rli->offset_align, desired_align);
1266    }
1267  else
1268    {
1269      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1270      normalize_rli (rli);
1271    }
1272}
1273
1274/* Assuming that all the fields have been laid out, this function uses
1275   RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1276   indicated by RLI.  */
1277
1278static void
1279finalize_record_size (record_layout_info rli)
1280{
1281  tree unpadded_size, unpadded_size_unit;
1282
1283  /* Now we want just byte and bit offsets, so set the offset alignment
1284     to be a byte and then normalize.  */
1285  rli->offset_align = BITS_PER_UNIT;
1286  normalize_rli (rli);
1287
1288  /* Determine the desired alignment.  */
1289#ifdef ROUND_TYPE_ALIGN
1290  TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1291					  rli->record_align);
1292#else
1293  TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1294#endif
1295
1296  /* Compute the size so far.  Be sure to allow for extra bits in the
1297     size in bytes.  We have guaranteed above that it will be no more
1298     than a single byte.  */
1299  unpadded_size = rli_size_so_far (rli);
1300  unpadded_size_unit = rli_size_unit_so_far (rli);
1301  if (! integer_zerop (rli->bitpos))
1302    unpadded_size_unit
1303      = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1304
1305  /* Round the size up to be a multiple of the required alignment.  */
1306  TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1307  TYPE_SIZE_UNIT (rli->t)
1308    = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1309
1310  if (TREE_CONSTANT (unpadded_size)
1311      && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
1312    warning (OPT_Wpadded, "padding struct size to alignment boundary");
1313
1314  if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1315      && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1316      && TREE_CONSTANT (unpadded_size))
1317    {
1318      tree unpacked_size;
1319
1320#ifdef ROUND_TYPE_ALIGN
1321      rli->unpacked_align
1322	= ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1323#else
1324      rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1325#endif
1326
1327      unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1328      if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1329	{
1330	  TYPE_PACKED (rli->t) = 0;
1331
1332	  if (TYPE_NAME (rli->t))
1333	    {
1334	      const char *name;
1335
1336	      if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1337		name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
1338	      else
1339		name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
1340
1341	      if (STRICT_ALIGNMENT)
1342		warning (OPT_Wpacked, "packed attribute causes inefficient "
1343			 "alignment for %qs", name);
1344	      else
1345		warning (OPT_Wpacked,
1346			 "packed attribute is unnecessary for %qs", name);
1347	    }
1348	  else
1349	    {
1350	      if (STRICT_ALIGNMENT)
1351		warning (OPT_Wpacked,
1352			 "packed attribute causes inefficient alignment");
1353	      else
1354		warning (OPT_Wpacked, "packed attribute is unnecessary");
1355	    }
1356	}
1357    }
1358}
1359
1360/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1361
1362void
1363compute_record_mode (tree type)
1364{
1365  tree field;
1366  enum machine_mode mode = VOIDmode;
1367
1368  /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1369     However, if possible, we use a mode that fits in a register
1370     instead, in order to allow for better optimization down the
1371     line.  */
1372  TYPE_MODE (type) = BLKmode;
1373
1374  if (! host_integerp (TYPE_SIZE (type), 1))
1375    return;
1376
1377  /* A record which has any BLKmode members must itself be
1378     BLKmode; it can't go in a register.  Unless the member is
1379     BLKmode only because it isn't aligned.  */
1380  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1381    {
1382      if (TREE_CODE (field) != FIELD_DECL)
1383	continue;
1384
1385      if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1386	  || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1387	      && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1388	      && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1389		   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1390	  || ! host_integerp (bit_position (field), 1)
1391	  || DECL_SIZE (field) == 0
1392	  || ! host_integerp (DECL_SIZE (field), 1))
1393	return;
1394
1395      /* If this field is the whole struct, remember its mode so
1396	 that, say, we can put a double in a class into a DF
1397	 register instead of forcing it to live in the stack.  */
1398      if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1399	mode = DECL_MODE (field);
1400
1401#ifdef MEMBER_TYPE_FORCES_BLK
1402      /* With some targets, eg. c4x, it is sub-optimal
1403	 to access an aligned BLKmode structure as a scalar.  */
1404
1405      if (MEMBER_TYPE_FORCES_BLK (field, mode))
1406	return;
1407#endif /* MEMBER_TYPE_FORCES_BLK  */
1408    }
1409
1410  /* If we only have one real field; use its mode if that mode's size
1411     matches the type's size.  This only applies to RECORD_TYPE.  This
1412     does not apply to unions.  */
1413  if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1414      && host_integerp (TYPE_SIZE (type), 1)
1415      && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1416    TYPE_MODE (type) = mode;
1417  else
1418    TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1419
1420  /* If structure's known alignment is less than what the scalar
1421     mode would need, and it matters, then stick with BLKmode.  */
1422  if (TYPE_MODE (type) != BLKmode
1423      && STRICT_ALIGNMENT
1424      && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1425	    || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1426    {
1427      /* If this is the only reason this type is BLKmode, then
1428	 don't force containing types to be BLKmode.  */
1429      TYPE_NO_FORCE_BLK (type) = 1;
1430      TYPE_MODE (type) = BLKmode;
1431    }
1432}
1433
1434/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1435   out.  */
1436
1437static void
1438finalize_type_size (tree type)
1439{
1440  /* Normally, use the alignment corresponding to the mode chosen.
1441     However, where strict alignment is not required, avoid
1442     over-aligning structures, since most compilers do not do this
1443     alignment.  */
1444
1445  if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1446      && (STRICT_ALIGNMENT
1447	  || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1448	      && TREE_CODE (type) != QUAL_UNION_TYPE
1449	      && TREE_CODE (type) != ARRAY_TYPE)))
1450    {
1451      unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1452
1453      /* Don't override a larger alignment requirement coming from a user
1454	 alignment of one of the fields.  */
1455      if (mode_align >= TYPE_ALIGN (type))
1456	{
1457	  TYPE_ALIGN (type) = mode_align;
1458	  TYPE_USER_ALIGN (type) = 0;
1459	}
1460    }
1461
1462  /* Do machine-dependent extra alignment.  */
1463#ifdef ROUND_TYPE_ALIGN
1464  TYPE_ALIGN (type)
1465    = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1466#endif
1467
1468  /* If we failed to find a simple way to calculate the unit size
1469     of the type, find it by division.  */
1470  if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1471    /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1472       result will fit in sizetype.  We will get more efficient code using
1473       sizetype, so we force a conversion.  */
1474    TYPE_SIZE_UNIT (type)
1475      = fold_convert (sizetype,
1476		      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1477				  bitsize_unit_node));
1478
1479  if (TYPE_SIZE (type) != 0)
1480    {
1481      TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1482      TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
1483					TYPE_ALIGN_UNIT (type));
1484    }
1485
1486  /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1487  if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1488    TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1489  if (TYPE_SIZE_UNIT (type) != 0
1490      && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1491    TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1492
1493  /* Also layout any other variants of the type.  */
1494  if (TYPE_NEXT_VARIANT (type)
1495      || type != TYPE_MAIN_VARIANT (type))
1496    {
1497      tree variant;
1498      /* Record layout info of this variant.  */
1499      tree size = TYPE_SIZE (type);
1500      tree size_unit = TYPE_SIZE_UNIT (type);
1501      unsigned int align = TYPE_ALIGN (type);
1502      unsigned int user_align = TYPE_USER_ALIGN (type);
1503      enum machine_mode mode = TYPE_MODE (type);
1504
1505      /* Copy it into all variants.  */
1506      for (variant = TYPE_MAIN_VARIANT (type);
1507	   variant != 0;
1508	   variant = TYPE_NEXT_VARIANT (variant))
1509	{
1510	  TYPE_SIZE (variant) = size;
1511	  TYPE_SIZE_UNIT (variant) = size_unit;
1512	  TYPE_ALIGN (variant) = align;
1513	  TYPE_USER_ALIGN (variant) = user_align;
1514	  TYPE_MODE (variant) = mode;
1515	}
1516    }
1517}
1518
1519/* Do all of the work required to layout the type indicated by RLI,
1520   once the fields have been laid out.  This function will call `free'
1521   for RLI, unless FREE_P is false.  Passing a value other than false
1522   for FREE_P is bad practice; this option only exists to support the
1523   G++ 3.2 ABI.  */
1524
1525void
1526finish_record_layout (record_layout_info rli, int free_p)
1527{
1528  tree variant;
1529
1530  /* Compute the final size.  */
1531  finalize_record_size (rli);
1532
1533  /* Compute the TYPE_MODE for the record.  */
1534  compute_record_mode (rli->t);
1535
1536  /* Perform any last tweaks to the TYPE_SIZE, etc.  */
1537  finalize_type_size (rli->t);
1538
1539  /* Propagate TYPE_PACKED to variants.  With C++ templates,
1540     handle_packed_attribute is too early to do this.  */
1541  for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1542       variant = TYPE_NEXT_VARIANT (variant))
1543    TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1544
1545  /* Lay out any static members.  This is done now because their type
1546     may use the record's type.  */
1547  while (rli->pending_statics)
1548    {
1549      layout_decl (TREE_VALUE (rli->pending_statics), 0);
1550      rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1551    }
1552
1553  /* Clean up.  */
1554  if (free_p)
1555    free (rli);
1556}
1557
1558
1559/* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
1560   NAME, its fields are chained in reverse on FIELDS.
1561
1562   If ALIGN_TYPE is non-null, it is given the same alignment as
1563   ALIGN_TYPE.  */
1564
1565void
1566finish_builtin_struct (tree type, const char *name, tree fields,
1567		       tree align_type)
1568{
1569  tree tail, next;
1570
1571  for (tail = NULL_TREE; fields; tail = fields, fields = next)
1572    {
1573      DECL_FIELD_CONTEXT (fields) = type;
1574      next = TREE_CHAIN (fields);
1575      TREE_CHAIN (fields) = tail;
1576    }
1577  TYPE_FIELDS (type) = tail;
1578
1579  if (align_type)
1580    {
1581      TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1582      TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1583    }
1584
1585  layout_type (type);
1586#if 0 /* not yet, should get fixed properly later */
1587  TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1588#else
1589  TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
1590#endif
1591  TYPE_STUB_DECL (type) = TYPE_NAME (type);
1592  layout_decl (TYPE_NAME (type), 0);
1593}
1594
1595/* Calculate the mode, size, and alignment for TYPE.
1596   For an array type, calculate the element separation as well.
1597   Record TYPE on the chain of permanent or temporary types
1598   so that dbxout will find out about it.
1599
1600   TYPE_SIZE of a type is nonzero if the type has been laid out already.
1601   layout_type does nothing on such a type.
1602
1603   If the type is incomplete, its TYPE_SIZE remains zero.  */
1604
1605void
1606layout_type (tree type)
1607{
1608  gcc_assert (type);
1609
1610  if (type == error_mark_node)
1611    return;
1612
1613  /* Do nothing if type has been laid out before.  */
1614  if (TYPE_SIZE (type))
1615    return;
1616
1617  switch (TREE_CODE (type))
1618    {
1619    case LANG_TYPE:
1620      /* This kind of type is the responsibility
1621	 of the language-specific code.  */
1622      gcc_unreachable ();
1623
1624    case BOOLEAN_TYPE:  /* Used for Java, Pascal, and Chill.  */
1625      if (TYPE_PRECISION (type) == 0)
1626	TYPE_PRECISION (type) = 1; /* default to one byte/boolean.  */
1627
1628      /* ... fall through ...  */
1629
1630    case INTEGER_TYPE:
1631    case ENUMERAL_TYPE:
1632    case CHAR_TYPE:
1633      if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1634	  && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1635	TYPE_UNSIGNED (type) = 1;
1636
1637      TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
1638						 MODE_INT);
1639      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1640      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1641      break;
1642
1643    case REAL_TYPE:
1644      TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
1645      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1646      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1647      break;
1648
1649    case COMPLEX_TYPE:
1650      TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1651      TYPE_MODE (type)
1652	= mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1653			 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1654			  ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1655			 0);
1656      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1657      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1658      break;
1659
1660    case VECTOR_TYPE:
1661      {
1662	int nunits = TYPE_VECTOR_SUBPARTS (type);
1663	tree nunits_tree = build_int_cst (NULL_TREE, nunits);
1664	tree innertype = TREE_TYPE (type);
1665
1666	gcc_assert (!(nunits & (nunits - 1)));
1667
1668	/* Find an appropriate mode for the vector type.  */
1669	if (TYPE_MODE (type) == VOIDmode)
1670	  {
1671	    enum machine_mode innermode = TYPE_MODE (innertype);
1672	    enum machine_mode mode;
1673
1674	    /* First, look for a supported vector type.  */
1675	    if (GET_MODE_CLASS (innermode) == MODE_FLOAT)
1676	      mode = MIN_MODE_VECTOR_FLOAT;
1677	    else
1678	      mode = MIN_MODE_VECTOR_INT;
1679
1680	    for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1681	      if (GET_MODE_NUNITS (mode) == nunits
1682	  	  && GET_MODE_INNER (mode) == innermode
1683	  	  && targetm.vector_mode_supported_p (mode))
1684	        break;
1685
1686	    /* For integers, try mapping it to a same-sized scalar mode.  */
1687	    if (mode == VOIDmode
1688	        && GET_MODE_CLASS (innermode) == MODE_INT)
1689	      mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1690				    MODE_INT, 0);
1691
1692	    if (mode == VOIDmode || !have_regs_of_mode[mode])
1693	      TYPE_MODE (type) = BLKmode;
1694	    else
1695	      TYPE_MODE (type) = mode;
1696	  }
1697
1698        TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1699	TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1700					         TYPE_SIZE_UNIT (innertype),
1701					         nunits_tree, 0);
1702	TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1703					    nunits_tree, 0);
1704
1705	/* Always naturally align vectors.  This prevents ABI changes
1706	   depending on whether or not native vector modes are supported.  */
1707	TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1708        break;
1709      }
1710
1711    case VOID_TYPE:
1712      /* This is an incomplete type and so doesn't have a size.  */
1713      TYPE_ALIGN (type) = 1;
1714      TYPE_USER_ALIGN (type) = 0;
1715      TYPE_MODE (type) = VOIDmode;
1716      break;
1717
1718    case OFFSET_TYPE:
1719      TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1720      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1721      /* A pointer might be MODE_PARTIAL_INT,
1722	 but ptrdiff_t must be integral.  */
1723      TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
1724      break;
1725
1726    case FUNCTION_TYPE:
1727    case METHOD_TYPE:
1728      /* It's hard to see what the mode and size of a function ought to
1729	 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1730	 make it consistent with that.  */
1731      TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
1732      TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1733      TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1734      break;
1735
1736    case POINTER_TYPE:
1737    case REFERENCE_TYPE:
1738      {
1739
1740	enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
1741				   && reference_types_internal)
1742				  ? Pmode : TYPE_MODE (type));
1743
1744	int nbits = GET_MODE_BITSIZE (mode);
1745
1746	TYPE_SIZE (type) = bitsize_int (nbits);
1747	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1748	TYPE_UNSIGNED (type) = 1;
1749	TYPE_PRECISION (type) = nbits;
1750      }
1751      break;
1752
1753    case ARRAY_TYPE:
1754      {
1755	tree index = TYPE_DOMAIN (type);
1756	tree element = TREE_TYPE (type);
1757
1758	build_pointer_type (element);
1759
1760	/* We need to know both bounds in order to compute the size.  */
1761	if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1762	    && TYPE_SIZE (element))
1763	  {
1764	    tree ub = TYPE_MAX_VALUE (index);
1765	    tree lb = TYPE_MIN_VALUE (index);
1766	    tree length;
1767	    tree element_size;
1768
1769	    /* The initial subtraction should happen in the original type so
1770	       that (possible) negative values are handled appropriately.  */
1771	    length = size_binop (PLUS_EXPR, size_one_node,
1772				 fold_convert (sizetype,
1773					       fold_build2 (MINUS_EXPR,
1774							    TREE_TYPE (lb),
1775							    ub, lb)));
1776
1777	    /* Special handling for arrays of bits (for Chill).  */
1778	    element_size = TYPE_SIZE (element);
1779	    if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
1780		&& (integer_zerop (TYPE_MAX_VALUE (element))
1781		    || integer_onep (TYPE_MAX_VALUE (element)))
1782		&& host_integerp (TYPE_MIN_VALUE (element), 1))
1783	      {
1784		HOST_WIDE_INT maxvalue
1785		  = tree_low_cst (TYPE_MAX_VALUE (element), 1);
1786		HOST_WIDE_INT minvalue
1787		  = tree_low_cst (TYPE_MIN_VALUE (element), 1);
1788
1789		if (maxvalue - minvalue == 1
1790		    && (maxvalue == 1 || maxvalue == 0))
1791		  element_size = integer_one_node;
1792	      }
1793
1794	    /* If neither bound is a constant and sizetype is signed, make
1795	       sure the size is never negative.  We should really do this
1796	       if *either* bound is non-constant, but this is the best
1797	       compromise between C and Ada.  */
1798	    if (!TYPE_UNSIGNED (sizetype)
1799		&& TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
1800		&& TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
1801	      length = size_binop (MAX_EXPR, length, size_zero_node);
1802
1803	    TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1804					   fold_convert (bitsizetype,
1805							 length));
1806
1807	    /* If we know the size of the element, calculate the total
1808	       size directly, rather than do some division thing below.
1809	       This optimization helps Fortran assumed-size arrays
1810	       (where the size of the array is determined at runtime)
1811	       substantially.
1812	       Note that we can't do this in the case where the size of
1813	       the elements is one bit since TYPE_SIZE_UNIT cannot be
1814	       set correctly in that case.  */
1815	    if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
1816	      TYPE_SIZE_UNIT (type)
1817		= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1818	  }
1819
1820	/* Now round the alignment and size,
1821	   using machine-dependent criteria if any.  */
1822
1823#ifdef ROUND_TYPE_ALIGN
1824	TYPE_ALIGN (type)
1825	  = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1826#else
1827	TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1828#endif
1829	TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
1830	TYPE_MODE (type) = BLKmode;
1831	if (TYPE_SIZE (type) != 0
1832#ifdef MEMBER_TYPE_FORCES_BLK
1833	    && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
1834#endif
1835	    /* BLKmode elements force BLKmode aggregate;
1836	       else extract/store fields may lose.  */
1837	    && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
1838		|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
1839	  {
1840	    /* One-element arrays get the component type's mode.  */
1841	    if (simple_cst_equal (TYPE_SIZE (type),
1842				  TYPE_SIZE (TREE_TYPE (type))))
1843	      TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
1844	    else
1845	      TYPE_MODE (type)
1846		= mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1847
1848	    if (TYPE_MODE (type) != BLKmode
1849		&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
1850		&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))
1851		&& TYPE_MODE (type) != BLKmode)
1852	      {
1853		TYPE_NO_FORCE_BLK (type) = 1;
1854		TYPE_MODE (type) = BLKmode;
1855	      }
1856	  }
1857	/* When the element size is constant, check that it is at least as
1858	   large as the element alignment.  */
1859	if (TYPE_SIZE_UNIT (element)
1860	    && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
1861	    /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
1862	       TYPE_ALIGN_UNIT.  */
1863	    && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element))
1864	    && !integer_zerop (TYPE_SIZE_UNIT (element))
1865	    && compare_tree_int (TYPE_SIZE_UNIT (element),
1866			  	 TYPE_ALIGN_UNIT (element)) < 0)
1867	  error ("alignment of array elements is greater than element size");
1868	break;
1869      }
1870
1871    case RECORD_TYPE:
1872    case UNION_TYPE:
1873    case QUAL_UNION_TYPE:
1874      {
1875	tree field;
1876	record_layout_info rli;
1877
1878	/* Initialize the layout information.  */
1879	rli = start_record_layout (type);
1880
1881	/* If this is a QUAL_UNION_TYPE, we want to process the fields
1882	   in the reverse order in building the COND_EXPR that denotes
1883	   its size.  We reverse them again later.  */
1884	if (TREE_CODE (type) == QUAL_UNION_TYPE)
1885	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1886
1887	/* Place all the fields.  */
1888	for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1889	  place_field (rli, field);
1890
1891	if (TREE_CODE (type) == QUAL_UNION_TYPE)
1892	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1893
1894	if (lang_adjust_rli)
1895	  (*lang_adjust_rli) (rli);
1896
1897	/* Finish laying out the record.  */
1898	finish_record_layout (rli, /*free_p=*/true);
1899      }
1900      break;
1901
1902    default:
1903      gcc_unreachable ();
1904    }
1905
1906  /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
1907     records and unions, finish_record_layout already called this
1908     function.  */
1909  if (TREE_CODE (type) != RECORD_TYPE
1910      && TREE_CODE (type) != UNION_TYPE
1911      && TREE_CODE (type) != QUAL_UNION_TYPE)
1912    finalize_type_size (type);
1913
1914  /* If an alias set has been set for this aggregate when it was incomplete,
1915     force it into alias set 0.
1916     This is too conservative, but we cannot call record_component_aliases
1917     here because some frontends still change the aggregates after
1918     layout_type.  */
1919  if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
1920    TYPE_ALIAS_SET (type) = 0;
1921}
1922
1923/* Create and return a type for signed integers of PRECISION bits.  */
1924
1925tree
1926make_signed_type (int precision)
1927{
1928  tree type = make_node (INTEGER_TYPE);
1929
1930  TYPE_PRECISION (type) = precision;
1931
1932  fixup_signed_type (type);
1933  return type;
1934}
1935
1936/* Create and return a type for unsigned integers of PRECISION bits.  */
1937
1938tree
1939make_unsigned_type (int precision)
1940{
1941  tree type = make_node (INTEGER_TYPE);
1942
1943  TYPE_PRECISION (type) = precision;
1944
1945  fixup_unsigned_type (type);
1946  return type;
1947}
1948
1949/* Initialize sizetype and bitsizetype to a reasonable and temporary
1950   value to enable integer types to be created.  */
1951
1952void
1953initialize_sizetypes (bool signed_p)
1954{
1955  tree t = make_node (INTEGER_TYPE);
1956
1957  TYPE_MODE (t) = SImode;
1958  TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
1959  TYPE_USER_ALIGN (t) = 0;
1960  TYPE_IS_SIZETYPE (t) = 1;
1961  TYPE_UNSIGNED (t) = !signed_p;
1962  TYPE_SIZE (t) = build_int_cst (t, GET_MODE_BITSIZE (SImode));
1963  TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
1964  TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
1965  TYPE_MIN_VALUE (t) = build_int_cst (t, 0);
1966
1967  /* 1000 avoids problems with possible overflow and is certainly
1968     larger than any size value we'd want to be storing.  */
1969  TYPE_MAX_VALUE (t) = build_int_cst (t, 1000);
1970
1971  sizetype = t;
1972  bitsizetype = build_distinct_type_copy (t);
1973}
1974
1975/* Make sizetype a version of TYPE, and initialize *sizetype
1976   accordingly.  We do this by overwriting the stub sizetype and
1977   bitsizetype nodes created by initialize_sizetypes.  This makes sure
1978   that (a) anything stubby about them no longer exists, (b) any
1979   INTEGER_CSTs created with such a type, remain valid.  */
1980
1981void
1982set_sizetype (tree type)
1983{
1984  int oprecision = TYPE_PRECISION (type);
1985  /* The *bitsizetype types use a precision that avoids overflows when
1986     calculating signed sizes / offsets in bits.  However, when
1987     cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
1988     precision.  */
1989  int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
1990		       2 * HOST_BITS_PER_WIDE_INT);
1991  tree t;
1992
1993  gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
1994
1995  t = build_distinct_type_copy (type);
1996  /* We do want to use sizetype's cache, as we will be replacing that
1997     type.  */
1998  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
1999  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2000  TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2001  TYPE_UID (t) = TYPE_UID (sizetype);
2002  TYPE_IS_SIZETYPE (t) = 1;
2003
2004  /* Replace our original stub sizetype.  */
2005  memcpy (sizetype, t, tree_size (sizetype));
2006  TYPE_MAIN_VARIANT (sizetype) = sizetype;
2007
2008  t = make_node (INTEGER_TYPE);
2009  TYPE_NAME (t) = get_identifier ("bit_size_type");
2010  /* We do want to use bitsizetype's cache, as we will be replacing that
2011     type.  */
2012  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2013  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2014  TYPE_PRECISION (t) = precision;
2015  TYPE_UID (t) = TYPE_UID (bitsizetype);
2016  TYPE_IS_SIZETYPE (t) = 1;
2017
2018  /* Replace our original stub bitsizetype.  */
2019  memcpy (bitsizetype, t, tree_size (bitsizetype));
2020  TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2021
2022  if (TYPE_UNSIGNED (type))
2023    {
2024      fixup_unsigned_type (bitsizetype);
2025      ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
2026      TYPE_IS_SIZETYPE (ssizetype) = 1;
2027      sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
2028      TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2029    }
2030  else
2031    {
2032      fixup_signed_type (bitsizetype);
2033      ssizetype = sizetype;
2034      sbitsizetype = bitsizetype;
2035    }
2036}
2037
2038/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
2039   BOOLEAN_TYPE, or CHAR_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2040   for TYPE, based on the PRECISION and whether or not the TYPE
2041   IS_UNSIGNED.  PRECISION need not correspond to a width supported
2042   natively by the hardware; for example, on a machine with 8-bit,
2043   16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2044   61.  */
2045
2046void
2047set_min_and_max_values_for_integral_type (tree type,
2048					  int precision,
2049					  bool is_unsigned)
2050{
2051  tree min_value;
2052  tree max_value;
2053
2054  if (is_unsigned)
2055    {
2056      min_value = build_int_cst (type, 0);
2057      max_value
2058	= build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2059			      ? -1
2060			      : ((HOST_WIDE_INT) 1 << precision) - 1,
2061			      precision - HOST_BITS_PER_WIDE_INT > 0
2062			      ? ((unsigned HOST_WIDE_INT) ~0
2063				 >> (HOST_BITS_PER_WIDE_INT
2064				     - (precision - HOST_BITS_PER_WIDE_INT)))
2065			      : 0);
2066    }
2067  else
2068    {
2069      min_value
2070	= build_int_cst_wide (type,
2071			      (precision - HOST_BITS_PER_WIDE_INT > 0
2072			       ? 0
2073			       : (HOST_WIDE_INT) (-1) << (precision - 1)),
2074			      (((HOST_WIDE_INT) (-1)
2075				<< (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2076				    ? precision - HOST_BITS_PER_WIDE_INT - 1
2077				    : 0))));
2078      max_value
2079	= build_int_cst_wide (type,
2080			      (precision - HOST_BITS_PER_WIDE_INT > 0
2081			       ? -1
2082			       : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2083			      (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2084			       ? (((HOST_WIDE_INT) 1
2085				   << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2086			       : 0));
2087    }
2088
2089  TYPE_MIN_VALUE (type) = min_value;
2090  TYPE_MAX_VALUE (type) = max_value;
2091}
2092
2093/* Set the extreme values of TYPE based on its precision in bits,
2094   then lay it out.  Used when make_signed_type won't do
2095   because the tree code is not INTEGER_TYPE.
2096   E.g. for Pascal, when the -fsigned-char option is given.  */
2097
2098void
2099fixup_signed_type (tree type)
2100{
2101  int precision = TYPE_PRECISION (type);
2102
2103  /* We can not represent properly constants greater then
2104     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2105     as they are used by i386 vector extensions and friends.  */
2106  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2107    precision = HOST_BITS_PER_WIDE_INT * 2;
2108
2109  set_min_and_max_values_for_integral_type (type, precision,
2110					    /*is_unsigned=*/false);
2111
2112  /* Lay out the type: set its alignment, size, etc.  */
2113  layout_type (type);
2114}
2115
2116/* Set the extreme values of TYPE based on its precision in bits,
2117   then lay it out.  This is used both in `make_unsigned_type'
2118   and for enumeral types.  */
2119
2120void
2121fixup_unsigned_type (tree type)
2122{
2123  int precision = TYPE_PRECISION (type);
2124
2125  /* We can not represent properly constants greater then
2126     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2127     as they are used by i386 vector extensions and friends.  */
2128  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2129    precision = HOST_BITS_PER_WIDE_INT * 2;
2130
2131  TYPE_UNSIGNED (type) = 1;
2132
2133  set_min_and_max_values_for_integral_type (type, precision,
2134					    /*is_unsigned=*/true);
2135
2136  /* Lay out the type: set its alignment, size, etc.  */
2137  layout_type (type);
2138}
2139
2140/* Find the best machine mode to use when referencing a bit field of length
2141   BITSIZE bits starting at BITPOS.
2142
2143   The underlying object is known to be aligned to a boundary of ALIGN bits.
2144   If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2145   larger than LARGEST_MODE (usually SImode).
2146
2147   If no mode meets all these conditions, we return VOIDmode.  Otherwise, if
2148   VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
2149   mode meeting these conditions.
2150
2151   Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
2152   the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2153   all the conditions.  */
2154
2155enum machine_mode
2156get_best_mode (int bitsize, int bitpos, unsigned int align,
2157	       enum machine_mode largest_mode, int volatilep)
2158{
2159  enum machine_mode mode;
2160  unsigned int unit = 0;
2161
2162  /* Find the narrowest integer mode that contains the bit field.  */
2163  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2164       mode = GET_MODE_WIDER_MODE (mode))
2165    {
2166      unit = GET_MODE_BITSIZE (mode);
2167      if ((bitpos % unit) + bitsize <= unit)
2168	break;
2169    }
2170
2171  if (mode == VOIDmode
2172      /* It is tempting to omit the following line
2173	 if STRICT_ALIGNMENT is true.
2174	 But that is incorrect, since if the bitfield uses part of 3 bytes
2175	 and we use a 4-byte mode, we could get a spurious segv
2176	 if the extra 4th byte is past the end of memory.
2177	 (Though at least one Unix compiler ignores this problem:
2178	 that on the Sequent 386 machine.  */
2179      || MIN (unit, BIGGEST_ALIGNMENT) > align
2180      || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2181    return VOIDmode;
2182
2183  if (SLOW_BYTE_ACCESS && ! volatilep)
2184    {
2185      enum machine_mode wide_mode = VOIDmode, tmode;
2186
2187      for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2188	   tmode = GET_MODE_WIDER_MODE (tmode))
2189	{
2190	  unit = GET_MODE_BITSIZE (tmode);
2191	  if (bitpos / unit == (bitpos + bitsize - 1) / unit
2192	      && unit <= BITS_PER_WORD
2193	      && unit <= MIN (align, BIGGEST_ALIGNMENT)
2194	      && (largest_mode == VOIDmode
2195		  || unit <= GET_MODE_BITSIZE (largest_mode)))
2196	    wide_mode = tmode;
2197	}
2198
2199      if (wide_mode != VOIDmode)
2200	return wide_mode;
2201    }
2202
2203  return mode;
2204}
2205
2206/* Gets minimal and maximal values for MODE (signed or unsigned depending on
2207   SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2208
2209void
2210get_mode_bounds (enum machine_mode mode, int sign,
2211		 enum machine_mode target_mode,
2212		 rtx *mmin, rtx *mmax)
2213{
2214  unsigned size = GET_MODE_BITSIZE (mode);
2215  unsigned HOST_WIDE_INT min_val, max_val;
2216
2217  gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2218
2219  if (sign)
2220    {
2221      min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2222      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2223    }
2224  else
2225    {
2226      min_val = 0;
2227      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2228    }
2229
2230  *mmin = gen_int_mode (min_val, target_mode);
2231  *mmax = gen_int_mode (max_val, target_mode);
2232}
2233
2234#include "gt-stor-layout.h"
2235