1/* C-compiler utilities for types and variables storage layout
2   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4   Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING.  If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA.  */
22
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "tree.h"
29#include "rtl.h"
30#include "tm_p.h"
31#include "flags.h"
32#include "function.h"
33#include "expr.h"
34#include "output.h"
35#include "toplev.h"
36#include "ggc.h"
37#include "target.h"
38#include "langhooks.h"
39#include "regs.h"
40#include "params.h"
41
42/* Data type for the expressions representing sizes of data types.
43   It is the first integer type laid out.  */
44tree sizetype_tab[(int) TYPE_KIND_LAST];
45
46/* If nonzero, this is an upper limit on alignment of structure fields.
47   The value is measured in bits.  */
48unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
49/* ... and its original value in bytes, specified via -fpack-struct=<value>.  */
50unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
51
52/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
53   allocated in Pmode, not ptr_mode.   Set only by internal_reference_types
54   called only by a front end.  */
55static int reference_types_internal = 0;
56
57static void finalize_record_size (record_layout_info);
58static void finalize_type_size (tree);
59static void place_union_field (record_layout_info, tree);
60#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
61static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
62			     HOST_WIDE_INT, tree);
63#endif
64extern void debug_rli (record_layout_info);
65
66/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded.  */
67
68static GTY(()) tree pending_sizes;
69
70/* Show that REFERENCE_TYPES are internal and should be Pmode.  Called only
71   by front end.  */
72
73void
74internal_reference_types (void)
75{
76  reference_types_internal = 1;
77}
78
79/* Get a list of all the objects put on the pending sizes list.  */
80
81tree
82get_pending_sizes (void)
83{
84  tree chain = pending_sizes;
85
86  pending_sizes = 0;
87  return chain;
88}
89
90/* Add EXPR to the pending sizes list.  */
91
92void
93put_pending_size (tree expr)
94{
95  /* Strip any simple arithmetic from EXPR to see if it has an underlying
96     SAVE_EXPR.  */
97  expr = skip_simple_arithmetic (expr);
98
99  if (TREE_CODE (expr) == SAVE_EXPR)
100    pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
101}
102
103/* Put a chain of objects into the pending sizes list, which must be
104   empty.  */
105
106void
107put_pending_sizes (tree chain)
108{
109  gcc_assert (!pending_sizes);
110  pending_sizes = chain;
111}
112
113/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
114   to serve as the actual size-expression for a type or decl.  */
115
116tree
117variable_size (tree size)
118{
119  tree save;
120
121  /* If the language-processor is to take responsibility for variable-sized
122     items (e.g., languages which have elaboration procedures like Ada),
123     just return SIZE unchanged.  Likewise for self-referential sizes and
124     constant sizes.  */
125  if (TREE_CONSTANT (size)
126      || lang_hooks.decls.global_bindings_p () < 0
127      || CONTAINS_PLACEHOLDER_P (size))
128    return size;
129
130  size = save_expr (size);
131
132  /* If an array with a variable number of elements is declared, and
133     the elements require destruction, we will emit a cleanup for the
134     array.  That cleanup is run both on normal exit from the block
135     and in the exception-handler for the block.  Normally, when code
136     is used in both ordinary code and in an exception handler it is
137     `unsaved', i.e., all SAVE_EXPRs are recalculated.  However, we do
138     not wish to do that here; the array-size is the same in both
139     places.  */
140  save = skip_simple_arithmetic (size);
141
142  if (cfun && cfun->x_dont_save_pending_sizes_p)
143    /* The front-end doesn't want us to keep a list of the expressions
144       that determine sizes for variable size objects.  Trust it.  */
145    return size;
146
147  if (lang_hooks.decls.global_bindings_p ())
148    {
149      if (TREE_CONSTANT (size))
150	error ("type size can%'t be explicitly evaluated");
151      else
152	error ("variable-size type declared outside of any function");
153
154      return size_one_node;
155    }
156
157  put_pending_size (save);
158
159  return size;
160}
161
162#ifndef MAX_FIXED_MODE_SIZE
163#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
164#endif
165
166/* Return the machine mode to use for a nonscalar of SIZE bits.  The
167   mode must be in class CLASS, and have exactly that many value bits;
168   it may have padding as well.  If LIMIT is nonzero, modes of wider
169   than MAX_FIXED_MODE_SIZE will not be used.  */
170
171enum machine_mode
172mode_for_size (unsigned int size, enum mode_class class, int limit)
173{
174  enum machine_mode mode;
175
176  if (limit && size > MAX_FIXED_MODE_SIZE)
177    return BLKmode;
178
179  /* Get the first mode which has this size, in the specified class.  */
180  for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
181       mode = GET_MODE_WIDER_MODE (mode))
182    if (GET_MODE_PRECISION (mode) == size)
183      return mode;
184
185  return BLKmode;
186}
187
188/* Similar, except passed a tree node.  */
189
190enum machine_mode
191mode_for_size_tree (tree size, enum mode_class class, int limit)
192{
193  unsigned HOST_WIDE_INT uhwi;
194  unsigned int ui;
195
196  if (!host_integerp (size, 1))
197    return BLKmode;
198  uhwi = tree_low_cst (size, 1);
199  ui = uhwi;
200  if (uhwi != ui)
201    return BLKmode;
202  return mode_for_size (ui, class, limit);
203}
204
205/* Similar, but never return BLKmode; return the narrowest mode that
206   contains at least the requested number of value bits.  */
207
208enum machine_mode
209smallest_mode_for_size (unsigned int size, enum mode_class class)
210{
211  enum machine_mode mode;
212
213  /* Get the first mode which has at least this size, in the
214     specified class.  */
215  for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
216       mode = GET_MODE_WIDER_MODE (mode))
217    if (GET_MODE_PRECISION (mode) >= size)
218      return mode;
219
220  gcc_unreachable ();
221}
222
223/* Find an integer mode of the exact same size, or BLKmode on failure.  */
224
225enum machine_mode
226int_mode_for_mode (enum machine_mode mode)
227{
228  switch (GET_MODE_CLASS (mode))
229    {
230    case MODE_INT:
231    case MODE_PARTIAL_INT:
232      break;
233
234    case MODE_COMPLEX_INT:
235    case MODE_COMPLEX_FLOAT:
236    case MODE_FLOAT:
237    case MODE_DECIMAL_FLOAT:
238    case MODE_VECTOR_INT:
239    case MODE_VECTOR_FLOAT:
240      mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
241      break;
242
243    case MODE_RANDOM:
244      if (mode == BLKmode)
245	break;
246
247      /* ... fall through ...  */
248
249    case MODE_CC:
250    default:
251      gcc_unreachable ();
252    }
253
254  return mode;
255}
256
257/* Return the alignment of MODE. This will be bounded by 1 and
258   BIGGEST_ALIGNMENT.  */
259
260unsigned int
261get_mode_alignment (enum machine_mode mode)
262{
263  return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
264}
265
266
267/* Subroutine of layout_decl: Force alignment required for the data type.
268   But if the decl itself wants greater alignment, don't override that.  */
269
270static inline void
271do_type_align (tree type, tree decl)
272{
273  if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
274    {
275      DECL_ALIGN (decl) = TYPE_ALIGN (type);
276      if (TREE_CODE (decl) == FIELD_DECL)
277	DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
278    }
279}
280
281/* Set the size, mode and alignment of a ..._DECL node.
282   TYPE_DECL does need this for C++.
283   Note that LABEL_DECL and CONST_DECL nodes do not need this,
284   and FUNCTION_DECL nodes have them set up in a special (and simple) way.
285   Don't call layout_decl for them.
286
287   KNOWN_ALIGN is the amount of alignment we can assume this
288   decl has with no special effort.  It is relevant only for FIELD_DECLs
289   and depends on the previous fields.
290   All that matters about KNOWN_ALIGN is which powers of 2 divide it.
291   If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
292   the record will be aligned to suit.  */
293
294void
295layout_decl (tree decl, unsigned int known_align)
296{
297  tree type = TREE_TYPE (decl);
298  enum tree_code code = TREE_CODE (decl);
299  rtx rtl = NULL_RTX;
300
301  if (code == CONST_DECL)
302    return;
303
304  gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
305	      || code == TYPE_DECL ||code == FIELD_DECL);
306
307  rtl = DECL_RTL_IF_SET (decl);
308
309  if (type == error_mark_node)
310    type = void_type_node;
311
312  /* Usually the size and mode come from the data type without change,
313     however, the front-end may set the explicit width of the field, so its
314     size may not be the same as the size of its type.  This happens with
315     bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
316     also happens with other fields.  For example, the C++ front-end creates
317     zero-sized fields corresponding to empty base classes, and depends on
318     layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
319     size in bytes from the size in bits.  If we have already set the mode,
320     don't set it again since we can be called twice for FIELD_DECLs.  */
321
322  DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
323  if (DECL_MODE (decl) == VOIDmode)
324    DECL_MODE (decl) = TYPE_MODE (type);
325
326  if (DECL_SIZE (decl) == 0)
327    {
328      DECL_SIZE (decl) = TYPE_SIZE (type);
329      DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
330    }
331  else if (DECL_SIZE_UNIT (decl) == 0)
332    DECL_SIZE_UNIT (decl)
333      = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
334					    bitsize_unit_node));
335
336  if (code != FIELD_DECL)
337    /* For non-fields, update the alignment from the type.  */
338    do_type_align (type, decl);
339  else
340    /* For fields, it's a bit more complicated...  */
341    {
342      bool old_user_align = DECL_USER_ALIGN (decl);
343      bool zero_bitfield = false;
344      bool packed_p = DECL_PACKED (decl);
345      unsigned int mfa;
346
347      if (DECL_BIT_FIELD (decl))
348	{
349	  DECL_BIT_FIELD_TYPE (decl) = type;
350
351	  /* A zero-length bit-field affects the alignment of the next
352	     field.  In essence such bit-fields are not influenced by
353	     any packing due to #pragma pack or attribute packed.  */
354	  if (integer_zerop (DECL_SIZE (decl))
355	      && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
356	    {
357	      zero_bitfield = true;
358	      packed_p = false;
359#ifdef PCC_BITFIELD_TYPE_MATTERS
360	      if (PCC_BITFIELD_TYPE_MATTERS)
361		do_type_align (type, decl);
362	      else
363#endif
364		{
365#ifdef EMPTY_FIELD_BOUNDARY
366		  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
367		    {
368		      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
369		      DECL_USER_ALIGN (decl) = 0;
370		    }
371#endif
372		}
373	    }
374
375	  /* See if we can use an ordinary integer mode for a bit-field.
376	     Conditions are: a fixed size that is correct for another mode
377	     and occupying a complete byte or bytes on proper boundary.  */
378	  if (TYPE_SIZE (type) != 0
379	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
380	      && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
381	    {
382	      enum machine_mode xmode
383		= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
384
385	      if (xmode != BLKmode
386		  && (known_align == 0
387		      || known_align >= GET_MODE_ALIGNMENT (xmode)))
388		{
389		  DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
390					   DECL_ALIGN (decl));
391		  DECL_MODE (decl) = xmode;
392		  DECL_BIT_FIELD (decl) = 0;
393		}
394	    }
395
396	  /* Turn off DECL_BIT_FIELD if we won't need it set.  */
397	  if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
398	      && known_align >= TYPE_ALIGN (type)
399	      && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
400	    DECL_BIT_FIELD (decl) = 0;
401	}
402      else if (packed_p && DECL_USER_ALIGN (decl))
403	/* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
404	   round up; we'll reduce it again below.  We want packing to
405	   supersede USER_ALIGN inherited from the type, but defer to
406	   alignment explicitly specified on the field decl.  */;
407      else
408	do_type_align (type, decl);
409
410      /* If the field is of variable size, we can't misalign it since we
411	 have no way to make a temporary to align the result.  But this
412	 isn't an issue if the decl is not addressable.  Likewise if it
413	 is of unknown size.
414
415	 Note that do_type_align may set DECL_USER_ALIGN, so we need to
416	 check old_user_align instead.  */
417      if (packed_p
418	  && !old_user_align
419	  && (DECL_NONADDRESSABLE_P (decl)
420	      || DECL_SIZE_UNIT (decl) == 0
421	      || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
422	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
423
424      if (! packed_p && ! DECL_USER_ALIGN (decl))
425	{
426	  /* Some targets (i.e. i386, VMS) limit struct field alignment
427	     to a lower boundary than alignment of variables unless
428	     it was overridden by attribute aligned.  */
429#ifdef BIGGEST_FIELD_ALIGNMENT
430	  DECL_ALIGN (decl)
431	    = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
432#endif
433#ifdef ADJUST_FIELD_ALIGN
434	  DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
435#endif
436	}
437
438      if (zero_bitfield)
439        mfa = initial_max_fld_align * BITS_PER_UNIT;
440      else
441	mfa = maximum_field_alignment;
442      /* Should this be controlled by DECL_USER_ALIGN, too?  */
443      if (mfa != 0)
444	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
445    }
446
447  /* Evaluate nonconstant size only once, either now or as soon as safe.  */
448  if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
449    DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
450  if (DECL_SIZE_UNIT (decl) != 0
451      && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
452    DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
453
454  /* If requested, warn about definitions of large data objects.  */
455  if (warn_larger_than
456      && (code == VAR_DECL || code == PARM_DECL)
457      && ! DECL_EXTERNAL (decl))
458    {
459      tree size = DECL_SIZE_UNIT (decl);
460
461      if (size != 0 && TREE_CODE (size) == INTEGER_CST
462	  && compare_tree_int (size, larger_than_size) > 0)
463	{
464	  int size_as_int = TREE_INT_CST_LOW (size);
465
466	  if (compare_tree_int (size, size_as_int) == 0)
467	    warning (0, "size of %q+D is %d bytes", decl, size_as_int);
468	  else
469	    warning (0, "size of %q+D is larger than %wd bytes",
470                     decl, larger_than_size);
471	}
472    }
473
474  /* If the RTL was already set, update its mode and mem attributes.  */
475  if (rtl)
476    {
477      PUT_MODE (rtl, DECL_MODE (decl));
478      SET_DECL_RTL (decl, 0);
479      set_mem_attributes (rtl, decl, 1);
480      SET_DECL_RTL (decl, rtl);
481    }
482}
483
484/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
485   a previous call to layout_decl and calls it again.  */
486
487void
488relayout_decl (tree decl)
489{
490  DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
491  DECL_MODE (decl) = VOIDmode;
492  if (!DECL_USER_ALIGN (decl))
493    DECL_ALIGN (decl) = 0;
494  SET_DECL_RTL (decl, 0);
495
496  layout_decl (decl, 0);
497}
498
499/* Hook for a front-end function that can modify the record layout as needed
500   immediately before it is finalized.  */
501
502static void (*lang_adjust_rli) (record_layout_info) = 0;
503
504void
505set_lang_adjust_rli (void (*f) (record_layout_info))
506{
507  lang_adjust_rli = f;
508}
509
510/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
511   QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
512   is to be passed to all other layout functions for this record.  It is the
513   responsibility of the caller to call `free' for the storage returned.
514   Note that garbage collection is not permitted until we finish laying
515   out the record.  */
516
517record_layout_info
518start_record_layout (tree t)
519{
520  record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
521
522  rli->t = t;
523
524  /* If the type has a minimum specified alignment (via an attribute
525     declaration, for example) use it -- otherwise, start with a
526     one-byte alignment.  */
527  rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
528  rli->unpacked_align = rli->record_align;
529  rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
530
531#ifdef STRUCTURE_SIZE_BOUNDARY
532  /* Packed structures don't need to have minimum size.  */
533  if (! TYPE_PACKED (t))
534    rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY);
535#endif
536
537  rli->offset = size_zero_node;
538  rli->bitpos = bitsize_zero_node;
539  rli->prev_field = 0;
540  rli->pending_statics = 0;
541  rli->packed_maybe_necessary = 0;
542  rli->remaining_in_alignment = 0;
543
544  return rli;
545}
546
547/* These four routines perform computations that convert between
548   the offset/bitpos forms and byte and bit offsets.  */
549
550tree
551bit_from_pos (tree offset, tree bitpos)
552{
553  return size_binop (PLUS_EXPR, bitpos,
554		     size_binop (MULT_EXPR,
555				 fold_convert (bitsizetype, offset),
556				 bitsize_unit_node));
557}
558
559tree
560byte_from_pos (tree offset, tree bitpos)
561{
562  return size_binop (PLUS_EXPR, offset,
563		     fold_convert (sizetype,
564				   size_binop (TRUNC_DIV_EXPR, bitpos,
565					       bitsize_unit_node)));
566}
567
568void
569pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
570	      tree pos)
571{
572  *poffset = size_binop (MULT_EXPR,
573			 fold_convert (sizetype,
574				       size_binop (FLOOR_DIV_EXPR, pos,
575						   bitsize_int (off_align))),
576			 size_int (off_align / BITS_PER_UNIT));
577  *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
578}
579
580/* Given a pointer to bit and byte offsets and an offset alignment,
581   normalize the offsets so they are within the alignment.  */
582
583void
584normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
585{
586  /* If the bit position is now larger than it should be, adjust it
587     downwards.  */
588  if (compare_tree_int (*pbitpos, off_align) >= 0)
589    {
590      tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
591				      bitsize_int (off_align));
592
593      *poffset
594	= size_binop (PLUS_EXPR, *poffset,
595		      size_binop (MULT_EXPR,
596				  fold_convert (sizetype, extra_aligns),
597				  size_int (off_align / BITS_PER_UNIT)));
598
599      *pbitpos
600	= size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
601    }
602}
603
604/* Print debugging information about the information in RLI.  */
605
606void
607debug_rli (record_layout_info rli)
608{
609  print_node_brief (stderr, "type", rli->t, 0);
610  print_node_brief (stderr, "\noffset", rli->offset, 0);
611  print_node_brief (stderr, " bitpos", rli->bitpos, 0);
612
613  fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
614	   rli->record_align, rli->unpacked_align,
615	   rli->offset_align);
616
617  /* The ms_struct code is the only that uses this.  */
618  if (targetm.ms_bitfield_layout_p (rli->t))
619    fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
620
621  if (rli->packed_maybe_necessary)
622    fprintf (stderr, "packed may be necessary\n");
623
624  if (rli->pending_statics)
625    {
626      fprintf (stderr, "pending statics:\n");
627      debug_tree (rli->pending_statics);
628    }
629}
630
631/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
632   BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
633
634void
635normalize_rli (record_layout_info rli)
636{
637  normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
638}
639
640/* Returns the size in bytes allocated so far.  */
641
642tree
643rli_size_unit_so_far (record_layout_info rli)
644{
645  return byte_from_pos (rli->offset, rli->bitpos);
646}
647
648/* Returns the size in bits allocated so far.  */
649
650tree
651rli_size_so_far (record_layout_info rli)
652{
653  return bit_from_pos (rli->offset, rli->bitpos);
654}
655
656/* FIELD is about to be added to RLI->T.  The alignment (in bits) of
657   the next available location within the record is given by KNOWN_ALIGN.
658   Update the variable alignment fields in RLI, and return the alignment
659   to give the FIELD.  */
660
661unsigned int
662update_alignment_for_field (record_layout_info rli, tree field,
663			    unsigned int known_align)
664{
665  /* The alignment required for FIELD.  */
666  unsigned int desired_align;
667  /* The type of this field.  */
668  tree type = TREE_TYPE (field);
669  /* True if the field was explicitly aligned by the user.  */
670  bool user_align;
671  bool is_bitfield;
672
673  /* Do not attempt to align an ERROR_MARK node */
674  if (TREE_CODE (type) == ERROR_MARK)
675    return 0;
676
677  /* Lay out the field so we know what alignment it needs.  */
678  layout_decl (field, known_align);
679  desired_align = DECL_ALIGN (field);
680  user_align = DECL_USER_ALIGN (field);
681
682  is_bitfield = (type != error_mark_node
683		 && DECL_BIT_FIELD_TYPE (field)
684		 && ! integer_zerop (TYPE_SIZE (type)));
685
686  /* Record must have at least as much alignment as any field.
687     Otherwise, the alignment of the field within the record is
688     meaningless.  */
689  if (targetm.ms_bitfield_layout_p (rli->t))
690    {
691      /* Here, the alignment of the underlying type of a bitfield can
692	 affect the alignment of a record; even a zero-sized field
693	 can do this.  The alignment should be to the alignment of
694	 the type, except that for zero-size bitfields this only
695	 applies if there was an immediately prior, nonzero-size
696	 bitfield.  (That's the way it is, experimentally.) */
697      if ((!is_bitfield && !DECL_PACKED (field))
698	  || (!integer_zerop (DECL_SIZE (field))
699	      ? !DECL_PACKED (field)
700	      : (rli->prev_field
701		 && DECL_BIT_FIELD_TYPE (rli->prev_field)
702		 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
703	{
704	  unsigned int type_align = TYPE_ALIGN (type);
705	  type_align = MAX (type_align, desired_align);
706	  if (maximum_field_alignment != 0)
707	    type_align = MIN (type_align, maximum_field_alignment);
708	  rli->record_align = MAX (rli->record_align, type_align);
709	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
710	}
711    }
712#ifdef PCC_BITFIELD_TYPE_MATTERS
713  else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
714    {
715      /* Named bit-fields cause the entire structure to have the
716	 alignment implied by their type.  Some targets also apply the same
717	 rules to unnamed bitfields.  */
718      if (DECL_NAME (field) != 0
719	  || targetm.align_anon_bitfield ())
720	{
721	  unsigned int type_align = TYPE_ALIGN (type);
722
723#ifdef ADJUST_FIELD_ALIGN
724	  if (! TYPE_USER_ALIGN (type))
725	    type_align = ADJUST_FIELD_ALIGN (field, type_align);
726#endif
727
728	  /* Targets might chose to handle unnamed and hence possibly
729	     zero-width bitfield.  Those are not influenced by #pragmas
730	     or packed attributes.  */
731	  if (integer_zerop (DECL_SIZE (field)))
732	    {
733	      if (initial_max_fld_align)
734	        type_align = MIN (type_align,
735				  initial_max_fld_align * BITS_PER_UNIT);
736	    }
737	  else if (maximum_field_alignment != 0)
738	    type_align = MIN (type_align, maximum_field_alignment);
739	  else if (DECL_PACKED (field))
740	    type_align = MIN (type_align, BITS_PER_UNIT);
741
742	  /* The alignment of the record is increased to the maximum
743	     of the current alignment, the alignment indicated on the
744	     field (i.e., the alignment specified by an __aligned__
745	     attribute), and the alignment indicated by the type of
746	     the field.  */
747	  rli->record_align = MAX (rli->record_align, desired_align);
748	  rli->record_align = MAX (rli->record_align, type_align);
749
750	  if (warn_packed)
751	    rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
752	  user_align |= TYPE_USER_ALIGN (type);
753	}
754    }
755#endif
756  else
757    {
758      rli->record_align = MAX (rli->record_align, desired_align);
759      rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
760    }
761
762  TYPE_USER_ALIGN (rli->t) |= user_align;
763
764  return desired_align;
765}
766
767/* Called from place_field to handle unions.  */
768
769static void
770place_union_field (record_layout_info rli, tree field)
771{
772  update_alignment_for_field (rli, field, /*known_align=*/0);
773
774  DECL_FIELD_OFFSET (field) = size_zero_node;
775  DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
776  SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
777
778  /* If this is an ERROR_MARK return *after* having set the
779     field at the start of the union. This helps when parsing
780     invalid fields. */
781  if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
782    return;
783
784  /* We assume the union's size will be a multiple of a byte so we don't
785     bother with BITPOS.  */
786  if (TREE_CODE (rli->t) == UNION_TYPE)
787    rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
788  else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
789    rli->offset = fold_build3 (COND_EXPR, sizetype,
790			       DECL_QUALIFIER (field),
791			       DECL_SIZE_UNIT (field), rli->offset);
792}
793
794#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
795/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
796   at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
797   units of alignment than the underlying TYPE.  */
798static int
799excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
800		  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
801{
802  /* Note that the calculation of OFFSET might overflow; we calculate it so
803     that we still get the right result as long as ALIGN is a power of two.  */
804  unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
805
806  offset = offset % align;
807  return ((offset + size + align - 1) / align
808	  > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
809	     / align));
810}
811#endif
812
813/* RLI contains information about the layout of a RECORD_TYPE.  FIELD
814   is a FIELD_DECL to be added after those fields already present in
815   T.  (FIELD is not actually added to the TYPE_FIELDS list here;
816   callers that desire that behavior must manually perform that step.)  */
817
818void
819place_field (record_layout_info rli, tree field)
820{
821  /* The alignment required for FIELD.  */
822  unsigned int desired_align;
823  /* The alignment FIELD would have if we just dropped it into the
824     record as it presently stands.  */
825  unsigned int known_align;
826  unsigned int actual_align;
827  /* The type of this field.  */
828  tree type = TREE_TYPE (field);
829
830  gcc_assert (TREE_CODE (field) != ERROR_MARK);
831
832  /* If FIELD is static, then treat it like a separate variable, not
833     really like a structure field.  If it is a FUNCTION_DECL, it's a
834     method.  In both cases, all we do is lay out the decl, and we do
835     it *after* the record is laid out.  */
836  if (TREE_CODE (field) == VAR_DECL)
837    {
838      rli->pending_statics = tree_cons (NULL_TREE, field,
839					rli->pending_statics);
840      return;
841    }
842
843  /* Enumerators and enum types which are local to this class need not
844     be laid out.  Likewise for initialized constant fields.  */
845  else if (TREE_CODE (field) != FIELD_DECL)
846    return;
847
848  /* Unions are laid out very differently than records, so split
849     that code off to another function.  */
850  else if (TREE_CODE (rli->t) != RECORD_TYPE)
851    {
852      place_union_field (rli, field);
853      return;
854    }
855
856  else if (TREE_CODE (type) == ERROR_MARK)
857    {
858      /* Place this field at the current allocation position, so we
859	 maintain monotonicity.  */
860      DECL_FIELD_OFFSET (field) = rli->offset;
861      DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
862      SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
863      return;
864    }
865
866  /* Work out the known alignment so far.  Note that A & (-A) is the
867     value of the least-significant bit in A that is one.  */
868  if (! integer_zerop (rli->bitpos))
869    known_align = (tree_low_cst (rli->bitpos, 1)
870		   & - tree_low_cst (rli->bitpos, 1));
871  else if (integer_zerop (rli->offset))
872    known_align = 0;
873  else if (host_integerp (rli->offset, 1))
874    known_align = (BITS_PER_UNIT
875		   * (tree_low_cst (rli->offset, 1)
876		      & - tree_low_cst (rli->offset, 1)));
877  else
878    known_align = rli->offset_align;
879
880  desired_align = update_alignment_for_field (rli, field, known_align);
881  if (known_align == 0)
882    known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
883
884  if (warn_packed && DECL_PACKED (field))
885    {
886      if (known_align >= TYPE_ALIGN (type))
887	{
888	  if (TYPE_ALIGN (type) > desired_align)
889	    {
890	      if (STRICT_ALIGNMENT)
891		warning (OPT_Wattributes, "packed attribute causes "
892                         "inefficient alignment for %q+D", field);
893	      else
894		warning (OPT_Wattributes, "packed attribute is "
895			 "unnecessary for %q+D", field);
896	    }
897	}
898      else
899	rli->packed_maybe_necessary = 1;
900    }
901
902  /* Does this field automatically have alignment it needs by virtue
903     of the fields that precede it and the record's own alignment?
904     We already align ms_struct fields, so don't re-align them.  */
905  if (known_align < desired_align
906      && !targetm.ms_bitfield_layout_p (rli->t))
907    {
908      /* No, we need to skip space before this field.
909	 Bump the cumulative size to multiple of field alignment.  */
910
911      warning (OPT_Wpadded, "padding struct to align %q+D", field);
912
913      /* If the alignment is still within offset_align, just align
914	 the bit position.  */
915      if (desired_align < rli->offset_align)
916	rli->bitpos = round_up (rli->bitpos, desired_align);
917      else
918	{
919	  /* First adjust OFFSET by the partial bits, then align.  */
920	  rli->offset
921	    = size_binop (PLUS_EXPR, rli->offset,
922			  fold_convert (sizetype,
923					size_binop (CEIL_DIV_EXPR, rli->bitpos,
924						    bitsize_unit_node)));
925	  rli->bitpos = bitsize_zero_node;
926
927	  rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
928	}
929
930      if (! TREE_CONSTANT (rli->offset))
931	rli->offset_align = desired_align;
932
933    }
934
935  /* Handle compatibility with PCC.  Note that if the record has any
936     variable-sized fields, we need not worry about compatibility.  */
937#ifdef PCC_BITFIELD_TYPE_MATTERS
938  if (PCC_BITFIELD_TYPE_MATTERS
939      && ! targetm.ms_bitfield_layout_p (rli->t)
940      && TREE_CODE (field) == FIELD_DECL
941      && type != error_mark_node
942      && DECL_BIT_FIELD (field)
943      && ! DECL_PACKED (field)
944      && maximum_field_alignment == 0
945      && ! integer_zerop (DECL_SIZE (field))
946      && host_integerp (DECL_SIZE (field), 1)
947      && host_integerp (rli->offset, 1)
948      && host_integerp (TYPE_SIZE (type), 1))
949    {
950      unsigned int type_align = TYPE_ALIGN (type);
951      tree dsize = DECL_SIZE (field);
952      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
953      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
954      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
955
956#ifdef ADJUST_FIELD_ALIGN
957      if (! TYPE_USER_ALIGN (type))
958	type_align = ADJUST_FIELD_ALIGN (field, type_align);
959#endif
960
961      /* A bit field may not span more units of alignment of its type
962	 than its type itself.  Advance to next boundary if necessary.  */
963      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
964	rli->bitpos = round_up (rli->bitpos, type_align);
965
966      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
967    }
968#endif
969
970#ifdef BITFIELD_NBYTES_LIMITED
971  if (BITFIELD_NBYTES_LIMITED
972      && ! targetm.ms_bitfield_layout_p (rli->t)
973      && TREE_CODE (field) == FIELD_DECL
974      && type != error_mark_node
975      && DECL_BIT_FIELD_TYPE (field)
976      && ! DECL_PACKED (field)
977      && ! integer_zerop (DECL_SIZE (field))
978      && host_integerp (DECL_SIZE (field), 1)
979      && host_integerp (rli->offset, 1)
980      && host_integerp (TYPE_SIZE (type), 1))
981    {
982      unsigned int type_align = TYPE_ALIGN (type);
983      tree dsize = DECL_SIZE (field);
984      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
985      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
986      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
987
988#ifdef ADJUST_FIELD_ALIGN
989      if (! TYPE_USER_ALIGN (type))
990	type_align = ADJUST_FIELD_ALIGN (field, type_align);
991#endif
992
993      if (maximum_field_alignment != 0)
994	type_align = MIN (type_align, maximum_field_alignment);
995      /* ??? This test is opposite the test in the containing if
996	 statement, so this code is unreachable currently.  */
997      else if (DECL_PACKED (field))
998	type_align = MIN (type_align, BITS_PER_UNIT);
999
1000      /* A bit field may not span the unit of alignment of its type.
1001	 Advance to next boundary if necessary.  */
1002      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1003	rli->bitpos = round_up (rli->bitpos, type_align);
1004
1005      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1006    }
1007#endif
1008
1009  /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1010     A subtlety:
1011	When a bit field is inserted into a packed record, the whole
1012	size of the underlying type is used by one or more same-size
1013	adjacent bitfields.  (That is, if its long:3, 32 bits is
1014	used in the record, and any additional adjacent long bitfields are
1015	packed into the same chunk of 32 bits. However, if the size
1016	changes, a new field of that size is allocated.)  In an unpacked
1017	record, this is the same as using alignment, but not equivalent
1018	when packing.
1019
1020     Note: for compatibility, we use the type size, not the type alignment
1021     to determine alignment, since that matches the documentation */
1022
1023  if (targetm.ms_bitfield_layout_p (rli->t))
1024    {
1025      tree prev_saved = rli->prev_field;
1026      tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1027
1028      /* This is a bitfield if it exists.  */
1029      if (rli->prev_field)
1030	{
1031	  /* If both are bitfields, nonzero, and the same size, this is
1032	     the middle of a run.  Zero declared size fields are special
1033	     and handled as "end of run". (Note: it's nonzero declared
1034	     size, but equal type sizes!) (Since we know that both
1035	     the current and previous fields are bitfields by the
1036	     time we check it, DECL_SIZE must be present for both.) */
1037	  if (DECL_BIT_FIELD_TYPE (field)
1038	      && !integer_zerop (DECL_SIZE (field))
1039	      && !integer_zerop (DECL_SIZE (rli->prev_field))
1040	      && host_integerp (DECL_SIZE (rli->prev_field), 0)
1041	      && host_integerp (TYPE_SIZE (type), 0)
1042	      && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1043	    {
1044	      /* We're in the middle of a run of equal type size fields; make
1045		 sure we realign if we run out of bits.  (Not decl size,
1046		 type size!) */
1047	      HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1048
1049	      if (rli->remaining_in_alignment < bitsize)
1050		{
1051		  HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1052
1053		  /* out of bits; bump up to next 'word'.  */
1054		  rli->bitpos
1055		    = size_binop (PLUS_EXPR, rli->bitpos,
1056				  bitsize_int (rli->remaining_in_alignment));
1057		  rli->prev_field = field;
1058		  if (typesize < bitsize)
1059		    rli->remaining_in_alignment = 0;
1060		  else
1061		    rli->remaining_in_alignment = typesize - bitsize;
1062		}
1063	      else
1064		rli->remaining_in_alignment -= bitsize;
1065	    }
1066	  else
1067	    {
1068	      /* End of a run: if leaving a run of bitfields of the same type
1069		 size, we have to "use up" the rest of the bits of the type
1070		 size.
1071
1072		 Compute the new position as the sum of the size for the prior
1073		 type and where we first started working on that type.
1074		 Note: since the beginning of the field was aligned then
1075		 of course the end will be too.  No round needed.  */
1076
1077	      if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1078		{
1079		  rli->bitpos
1080		    = size_binop (PLUS_EXPR, rli->bitpos,
1081				  bitsize_int (rli->remaining_in_alignment));
1082		}
1083	      else
1084		/* We "use up" size zero fields; the code below should behave
1085		   as if the prior field was not a bitfield.  */
1086		prev_saved = NULL;
1087
1088	      /* Cause a new bitfield to be captured, either this time (if
1089		 currently a bitfield) or next time we see one.  */
1090	      if (!DECL_BIT_FIELD_TYPE(field)
1091		  || integer_zerop (DECL_SIZE (field)))
1092		rli->prev_field = NULL;
1093	    }
1094
1095	  normalize_rli (rli);
1096        }
1097
1098      /* If we're starting a new run of same size type bitfields
1099	 (or a run of non-bitfields), set up the "first of the run"
1100	 fields.
1101
1102	 That is, if the current field is not a bitfield, or if there
1103	 was a prior bitfield the type sizes differ, or if there wasn't
1104	 a prior bitfield the size of the current field is nonzero.
1105
1106	 Note: we must be sure to test ONLY the type size if there was
1107	 a prior bitfield and ONLY for the current field being zero if
1108	 there wasn't.  */
1109
1110      if (!DECL_BIT_FIELD_TYPE (field)
1111	  || (prev_saved != NULL
1112	      ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1113	      : !integer_zerop (DECL_SIZE (field)) ))
1114	{
1115	  /* Never smaller than a byte for compatibility.  */
1116	  unsigned int type_align = BITS_PER_UNIT;
1117
1118	  /* (When not a bitfield), we could be seeing a flex array (with
1119	     no DECL_SIZE).  Since we won't be using remaining_in_alignment
1120	     until we see a bitfield (and come by here again) we just skip
1121	     calculating it.  */
1122	  if (DECL_SIZE (field) != NULL
1123	      && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
1124	      && host_integerp (DECL_SIZE (field), 0))
1125	    {
1126	      HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1127	      HOST_WIDE_INT typesize
1128		= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1129
1130	      if (typesize < bitsize)
1131		rli->remaining_in_alignment = 0;
1132	      else
1133		rli->remaining_in_alignment = typesize - bitsize;
1134	    }
1135
1136	  /* Now align (conventionally) for the new type.  */
1137	  type_align = TYPE_ALIGN (TREE_TYPE (field));
1138
1139	  if (maximum_field_alignment != 0)
1140	    type_align = MIN (type_align, maximum_field_alignment);
1141
1142	  rli->bitpos = round_up (rli->bitpos, type_align);
1143
1144          /* If we really aligned, don't allow subsequent bitfields
1145	     to undo that.  */
1146	  rli->prev_field = NULL;
1147	}
1148    }
1149
1150  /* Offset so far becomes the position of this field after normalizing.  */
1151  normalize_rli (rli);
1152  DECL_FIELD_OFFSET (field) = rli->offset;
1153  DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1154  SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1155
1156  /* If this field ended up more aligned than we thought it would be (we
1157     approximate this by seeing if its position changed), lay out the field
1158     again; perhaps we can use an integral mode for it now.  */
1159  if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1160    actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1161		    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1162  else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1163    actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1164  else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1165    actual_align = (BITS_PER_UNIT
1166		   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1167		      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1168  else
1169    actual_align = DECL_OFFSET_ALIGN (field);
1170  /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1171     store / extract bit field operations will check the alignment of the
1172     record against the mode of bit fields.  */
1173
1174  if (known_align != actual_align)
1175    layout_decl (field, actual_align);
1176
1177  if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1178    rli->prev_field = field;
1179
1180  /* Now add size of this field to the size of the record.  If the size is
1181     not constant, treat the field as being a multiple of bytes and just
1182     adjust the offset, resetting the bit position.  Otherwise, apportion the
1183     size amongst the bit position and offset.  First handle the case of an
1184     unspecified size, which can happen when we have an invalid nested struct
1185     definition, such as struct j { struct j { int i; } }.  The error message
1186     is printed in finish_struct.  */
1187  if (DECL_SIZE (field) == 0)
1188    /* Do nothing.  */;
1189  else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1190	   || TREE_CONSTANT_OVERFLOW (DECL_SIZE (field)))
1191    {
1192      rli->offset
1193	= size_binop (PLUS_EXPR, rli->offset,
1194		      fold_convert (sizetype,
1195				    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1196						bitsize_unit_node)));
1197      rli->offset
1198	= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1199      rli->bitpos = bitsize_zero_node;
1200      rli->offset_align = MIN (rli->offset_align, desired_align);
1201    }
1202  else if (targetm.ms_bitfield_layout_p (rli->t))
1203    {
1204      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1205
1206      /* If we ended a bitfield before the full length of the type then
1207	 pad the struct out to the full length of the last type.  */
1208      if ((TREE_CHAIN (field) == NULL
1209	   || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
1210	  && DECL_BIT_FIELD_TYPE (field)
1211	  && !integer_zerop (DECL_SIZE (field)))
1212	rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1213				  bitsize_int (rli->remaining_in_alignment));
1214
1215      normalize_rli (rli);
1216    }
1217  else
1218    {
1219      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1220      normalize_rli (rli);
1221    }
1222}
1223
1224/* Assuming that all the fields have been laid out, this function uses
1225   RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1226   indicated by RLI.  */
1227
1228static void
1229finalize_record_size (record_layout_info rli)
1230{
1231  tree unpadded_size, unpadded_size_unit;
1232
1233  /* Now we want just byte and bit offsets, so set the offset alignment
1234     to be a byte and then normalize.  */
1235  rli->offset_align = BITS_PER_UNIT;
1236  normalize_rli (rli);
1237
1238  /* Determine the desired alignment.  */
1239#ifdef ROUND_TYPE_ALIGN
1240  TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1241					  rli->record_align);
1242#else
1243  TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1244#endif
1245
1246  /* Compute the size so far.  Be sure to allow for extra bits in the
1247     size in bytes.  We have guaranteed above that it will be no more
1248     than a single byte.  */
1249  unpadded_size = rli_size_so_far (rli);
1250  unpadded_size_unit = rli_size_unit_so_far (rli);
1251  if (! integer_zerop (rli->bitpos))
1252    unpadded_size_unit
1253      = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1254
1255  /* Round the size up to be a multiple of the required alignment.  */
1256  TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1257  TYPE_SIZE_UNIT (rli->t)
1258    = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1259
1260  if (TREE_CONSTANT (unpadded_size)
1261      && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
1262    warning (OPT_Wpadded, "padding struct size to alignment boundary");
1263
1264  if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1265      && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1266      && TREE_CONSTANT (unpadded_size))
1267    {
1268      tree unpacked_size;
1269
1270#ifdef ROUND_TYPE_ALIGN
1271      rli->unpacked_align
1272	= ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1273#else
1274      rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1275#endif
1276
1277      unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1278      if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1279	{
1280	  TYPE_PACKED (rli->t) = 0;
1281
1282	  if (TYPE_NAME (rli->t))
1283	    {
1284	      const char *name;
1285
1286	      if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1287		name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
1288	      else
1289		name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
1290
1291	      if (STRICT_ALIGNMENT)
1292		warning (OPT_Wpacked, "packed attribute causes inefficient "
1293			 "alignment for %qs", name);
1294	      else
1295		warning (OPT_Wpacked,
1296			 "packed attribute is unnecessary for %qs", name);
1297	    }
1298	  else
1299	    {
1300	      if (STRICT_ALIGNMENT)
1301		warning (OPT_Wpacked,
1302			 "packed attribute causes inefficient alignment");
1303	      else
1304		warning (OPT_Wpacked, "packed attribute is unnecessary");
1305	    }
1306	}
1307    }
1308}
1309
1310/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1311
1312void
1313compute_record_mode (tree type)
1314{
1315  tree field;
1316  enum machine_mode mode = VOIDmode;
1317
1318  /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1319     However, if possible, we use a mode that fits in a register
1320     instead, in order to allow for better optimization down the
1321     line.  */
1322  TYPE_MODE (type) = BLKmode;
1323
1324  if (! host_integerp (TYPE_SIZE (type), 1))
1325    return;
1326
1327  /* A record which has any BLKmode members must itself be
1328     BLKmode; it can't go in a register.  Unless the member is
1329     BLKmode only because it isn't aligned.  */
1330  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1331    {
1332      if (TREE_CODE (field) != FIELD_DECL)
1333	continue;
1334
1335      if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1336	  || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1337	      && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1338	      && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1339		   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1340	  || ! host_integerp (bit_position (field), 1)
1341	  || DECL_SIZE (field) == 0
1342	  || ! host_integerp (DECL_SIZE (field), 1))
1343	return;
1344
1345      /* If this field is the whole struct, remember its mode so
1346	 that, say, we can put a double in a class into a DF
1347	 register instead of forcing it to live in the stack.  */
1348      if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1349	mode = DECL_MODE (field);
1350
1351#ifdef MEMBER_TYPE_FORCES_BLK
1352      /* With some targets, eg. c4x, it is sub-optimal
1353	 to access an aligned BLKmode structure as a scalar.  */
1354
1355      if (MEMBER_TYPE_FORCES_BLK (field, mode))
1356	return;
1357#endif /* MEMBER_TYPE_FORCES_BLK  */
1358    }
1359
1360  /* If we only have one real field; use its mode if that mode's size
1361     matches the type's size.  This only applies to RECORD_TYPE.  This
1362     does not apply to unions.  */
1363  if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1364      && host_integerp (TYPE_SIZE (type), 1)
1365      && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1366    TYPE_MODE (type) = mode;
1367  else
1368    TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1369
1370  /* If structure's known alignment is less than what the scalar
1371     mode would need, and it matters, then stick with BLKmode.  */
1372  if (TYPE_MODE (type) != BLKmode
1373      && STRICT_ALIGNMENT
1374      && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1375	    || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1376    {
1377      /* If this is the only reason this type is BLKmode, then
1378	 don't force containing types to be BLKmode.  */
1379      TYPE_NO_FORCE_BLK (type) = 1;
1380      TYPE_MODE (type) = BLKmode;
1381    }
1382}
1383
1384/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1385   out.  */
1386
1387static void
1388finalize_type_size (tree type)
1389{
1390  /* Normally, use the alignment corresponding to the mode chosen.
1391     However, where strict alignment is not required, avoid
1392     over-aligning structures, since most compilers do not do this
1393     alignment.  */
1394
1395  if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1396      && (STRICT_ALIGNMENT
1397	  || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1398	      && TREE_CODE (type) != QUAL_UNION_TYPE
1399	      && TREE_CODE (type) != ARRAY_TYPE)))
1400    {
1401      unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1402
1403      /* Don't override a larger alignment requirement coming from a user
1404	 alignment of one of the fields.  */
1405      if (mode_align >= TYPE_ALIGN (type))
1406	{
1407	  TYPE_ALIGN (type) = mode_align;
1408	  TYPE_USER_ALIGN (type) = 0;
1409	}
1410    }
1411
1412  /* Do machine-dependent extra alignment.  */
1413#ifdef ROUND_TYPE_ALIGN
1414  TYPE_ALIGN (type)
1415    = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1416#endif
1417
1418  /* If we failed to find a simple way to calculate the unit size
1419     of the type, find it by division.  */
1420  if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1421    /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1422       result will fit in sizetype.  We will get more efficient code using
1423       sizetype, so we force a conversion.  */
1424    TYPE_SIZE_UNIT (type)
1425      = fold_convert (sizetype,
1426		      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1427				  bitsize_unit_node));
1428
1429  if (TYPE_SIZE (type) != 0)
1430    {
1431      TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1432      TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
1433					TYPE_ALIGN_UNIT (type));
1434    }
1435
1436  /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1437  if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1438    TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1439  if (TYPE_SIZE_UNIT (type) != 0
1440      && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1441    TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1442
1443  /* Also layout any other variants of the type.  */
1444  if (TYPE_NEXT_VARIANT (type)
1445      || type != TYPE_MAIN_VARIANT (type))
1446    {
1447      tree variant;
1448      /* Record layout info of this variant.  */
1449      tree size = TYPE_SIZE (type);
1450      tree size_unit = TYPE_SIZE_UNIT (type);
1451      unsigned int align = TYPE_ALIGN (type);
1452      unsigned int user_align = TYPE_USER_ALIGN (type);
1453      enum machine_mode mode = TYPE_MODE (type);
1454
1455      /* Copy it into all variants.  */
1456      for (variant = TYPE_MAIN_VARIANT (type);
1457	   variant != 0;
1458	   variant = TYPE_NEXT_VARIANT (variant))
1459	{
1460	  TYPE_SIZE (variant) = size;
1461	  TYPE_SIZE_UNIT (variant) = size_unit;
1462	  TYPE_ALIGN (variant) = align;
1463	  TYPE_USER_ALIGN (variant) = user_align;
1464	  TYPE_MODE (variant) = mode;
1465	}
1466    }
1467}
1468
1469/* Do all of the work required to layout the type indicated by RLI,
1470   once the fields have been laid out.  This function will call `free'
1471   for RLI, unless FREE_P is false.  Passing a value other than false
1472   for FREE_P is bad practice; this option only exists to support the
1473   G++ 3.2 ABI.  */
1474
1475void
1476finish_record_layout (record_layout_info rli, int free_p)
1477{
1478  tree variant;
1479
1480  /* Compute the final size.  */
1481  finalize_record_size (rli);
1482
1483  /* Compute the TYPE_MODE for the record.  */
1484  compute_record_mode (rli->t);
1485
1486  /* Perform any last tweaks to the TYPE_SIZE, etc.  */
1487  finalize_type_size (rli->t);
1488
1489  /* Propagate TYPE_PACKED to variants.  With C++ templates,
1490     handle_packed_attribute is too early to do this.  */
1491  for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1492       variant = TYPE_NEXT_VARIANT (variant))
1493    TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1494
1495  /* Lay out any static members.  This is done now because their type
1496     may use the record's type.  */
1497  while (rli->pending_statics)
1498    {
1499      layout_decl (TREE_VALUE (rli->pending_statics), 0);
1500      rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1501    }
1502
1503  /* Clean up.  */
1504  if (free_p)
1505    free (rli);
1506}
1507
1508
1509/* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
1510   NAME, its fields are chained in reverse on FIELDS.
1511
1512   If ALIGN_TYPE is non-null, it is given the same alignment as
1513   ALIGN_TYPE.  */
1514
1515void
1516finish_builtin_struct (tree type, const char *name, tree fields,
1517		       tree align_type)
1518{
1519  tree tail, next;
1520
1521  for (tail = NULL_TREE; fields; tail = fields, fields = next)
1522    {
1523      DECL_FIELD_CONTEXT (fields) = type;
1524      next = TREE_CHAIN (fields);
1525      TREE_CHAIN (fields) = tail;
1526    }
1527  TYPE_FIELDS (type) = tail;
1528
1529  if (align_type)
1530    {
1531      TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1532      TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1533    }
1534
1535  layout_type (type);
1536#if 0 /* not yet, should get fixed properly later */
1537  TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1538#else
1539  TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
1540#endif
1541  TYPE_STUB_DECL (type) = TYPE_NAME (type);
1542  layout_decl (TYPE_NAME (type), 0);
1543}
1544
1545/* Calculate the mode, size, and alignment for TYPE.
1546   For an array type, calculate the element separation as well.
1547   Record TYPE on the chain of permanent or temporary types
1548   so that dbxout will find out about it.
1549
1550   TYPE_SIZE of a type is nonzero if the type has been laid out already.
1551   layout_type does nothing on such a type.
1552
1553   If the type is incomplete, its TYPE_SIZE remains zero.  */
1554
1555void
1556layout_type (tree type)
1557{
1558  gcc_assert (type);
1559
1560  if (type == error_mark_node)
1561    return;
1562
1563  /* Do nothing if type has been laid out before.  */
1564  if (TYPE_SIZE (type))
1565    return;
1566
1567  switch (TREE_CODE (type))
1568    {
1569    case LANG_TYPE:
1570      /* This kind of type is the responsibility
1571	 of the language-specific code.  */
1572      gcc_unreachable ();
1573
1574    case BOOLEAN_TYPE:  /* Used for Java, Pascal, and Chill.  */
1575      if (TYPE_PRECISION (type) == 0)
1576	TYPE_PRECISION (type) = 1; /* default to one byte/boolean.  */
1577
1578      /* ... fall through ...  */
1579
1580    case INTEGER_TYPE:
1581    case ENUMERAL_TYPE:
1582      if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1583	  && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1584	TYPE_UNSIGNED (type) = 1;
1585
1586      TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
1587						 MODE_INT);
1588      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1589      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1590      break;
1591
1592    case REAL_TYPE:
1593      TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
1594      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1595      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1596      break;
1597
1598    case COMPLEX_TYPE:
1599      TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1600      TYPE_MODE (type)
1601	= mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1602			 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1603			  ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1604			 0);
1605      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1606      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1607      break;
1608
1609    case VECTOR_TYPE:
1610      {
1611	int nunits = TYPE_VECTOR_SUBPARTS (type);
1612	tree nunits_tree = build_int_cst (NULL_TREE, nunits);
1613	tree innertype = TREE_TYPE (type);
1614
1615	gcc_assert (!(nunits & (nunits - 1)));
1616
1617	/* Find an appropriate mode for the vector type.  */
1618	if (TYPE_MODE (type) == VOIDmode)
1619	  {
1620	    enum machine_mode innermode = TYPE_MODE (innertype);
1621	    enum machine_mode mode;
1622
1623	    /* First, look for a supported vector type.  */
1624	    if (SCALAR_FLOAT_MODE_P (innermode))
1625	      mode = MIN_MODE_VECTOR_FLOAT;
1626	    else
1627	      mode = MIN_MODE_VECTOR_INT;
1628
1629	    for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1630	      if (GET_MODE_NUNITS (mode) == nunits
1631	  	  && GET_MODE_INNER (mode) == innermode
1632	  	  && targetm.vector_mode_supported_p (mode))
1633	        break;
1634
1635	    /* For integers, try mapping it to a same-sized scalar mode.  */
1636	    if (mode == VOIDmode
1637	        && GET_MODE_CLASS (innermode) == MODE_INT)
1638	      mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1639				    MODE_INT, 0);
1640
1641	    if (mode == VOIDmode || !have_regs_of_mode[mode])
1642	      TYPE_MODE (type) = BLKmode;
1643	    else
1644	      TYPE_MODE (type) = mode;
1645	  }
1646
1647        TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1648	TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1649					         TYPE_SIZE_UNIT (innertype),
1650					         nunits_tree, 0);
1651	TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1652					    nunits_tree, 0);
1653
1654	/* Always naturally align vectors.  This prevents ABI changes
1655	   depending on whether or not native vector modes are supported.  */
1656	TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1657        break;
1658      }
1659
1660    case VOID_TYPE:
1661      /* This is an incomplete type and so doesn't have a size.  */
1662      TYPE_ALIGN (type) = 1;
1663      TYPE_USER_ALIGN (type) = 0;
1664      TYPE_MODE (type) = VOIDmode;
1665      break;
1666
1667    case OFFSET_TYPE:
1668      TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1669      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1670      /* A pointer might be MODE_PARTIAL_INT,
1671	 but ptrdiff_t must be integral.  */
1672      TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
1673      break;
1674
1675    case FUNCTION_TYPE:
1676    case METHOD_TYPE:
1677      /* It's hard to see what the mode and size of a function ought to
1678	 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1679	 make it consistent with that.  */
1680      TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
1681      TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1682      TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1683      break;
1684
1685    case POINTER_TYPE:
1686    case REFERENCE_TYPE:
1687	/* APPLE LOCAL blocks */
1688    case BLOCK_POINTER_TYPE:
1689      {
1690
1691	enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
1692				   && reference_types_internal)
1693				  ? Pmode : TYPE_MODE (type));
1694
1695	int nbits = GET_MODE_BITSIZE (mode);
1696
1697	TYPE_SIZE (type) = bitsize_int (nbits);
1698	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1699	TYPE_UNSIGNED (type) = 1;
1700	TYPE_PRECISION (type) = nbits;
1701      }
1702      break;
1703
1704    case ARRAY_TYPE:
1705      {
1706	tree index = TYPE_DOMAIN (type);
1707	tree element = TREE_TYPE (type);
1708
1709	build_pointer_type (element);
1710
1711	/* We need to know both bounds in order to compute the size.  */
1712	if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1713	    && TYPE_SIZE (element))
1714	  {
1715	    tree ub = TYPE_MAX_VALUE (index);
1716	    tree lb = TYPE_MIN_VALUE (index);
1717	    tree length;
1718	    tree element_size;
1719
1720	    /* The initial subtraction should happen in the original type so
1721	       that (possible) negative values are handled appropriately.  */
1722	    length = size_binop (PLUS_EXPR, size_one_node,
1723				 fold_convert (sizetype,
1724					       fold_build2 (MINUS_EXPR,
1725							    TREE_TYPE (lb),
1726							    ub, lb)));
1727
1728	    /* Special handling for arrays of bits (for Chill).  */
1729	    element_size = TYPE_SIZE (element);
1730	    if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
1731		&& (integer_zerop (TYPE_MAX_VALUE (element))
1732		    || integer_onep (TYPE_MAX_VALUE (element)))
1733		&& host_integerp (TYPE_MIN_VALUE (element), 1))
1734	      {
1735		HOST_WIDE_INT maxvalue
1736		  = tree_low_cst (TYPE_MAX_VALUE (element), 1);
1737		HOST_WIDE_INT minvalue
1738		  = tree_low_cst (TYPE_MIN_VALUE (element), 1);
1739
1740		if (maxvalue - minvalue == 1
1741		    && (maxvalue == 1 || maxvalue == 0))
1742		  element_size = integer_one_node;
1743	      }
1744
1745	    /* If neither bound is a constant and sizetype is signed, make
1746	       sure the size is never negative.  We should really do this
1747	       if *either* bound is non-constant, but this is the best
1748	       compromise between C and Ada.  */
1749	    if (!TYPE_UNSIGNED (sizetype)
1750		&& TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
1751		&& TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
1752	      length = size_binop (MAX_EXPR, length, size_zero_node);
1753
1754	    TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1755					   fold_convert (bitsizetype,
1756							 length));
1757
1758	    /* If we know the size of the element, calculate the total
1759	       size directly, rather than do some division thing below.
1760	       This optimization helps Fortran assumed-size arrays
1761	       (where the size of the array is determined at runtime)
1762	       substantially.
1763	       Note that we can't do this in the case where the size of
1764	       the elements is one bit since TYPE_SIZE_UNIT cannot be
1765	       set correctly in that case.  */
1766	    if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
1767	      TYPE_SIZE_UNIT (type)
1768		= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1769	  }
1770
1771	/* Now round the alignment and size,
1772	   using machine-dependent criteria if any.  */
1773
1774#ifdef ROUND_TYPE_ALIGN
1775	TYPE_ALIGN (type)
1776	  = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1777#else
1778	TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1779#endif
1780	TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
1781	TYPE_MODE (type) = BLKmode;
1782	if (TYPE_SIZE (type) != 0
1783#ifdef MEMBER_TYPE_FORCES_BLK
1784	    && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
1785#endif
1786	    /* BLKmode elements force BLKmode aggregate;
1787	       else extract/store fields may lose.  */
1788	    && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
1789		|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
1790	  {
1791	    /* One-element arrays get the component type's mode.  */
1792	    if (simple_cst_equal (TYPE_SIZE (type),
1793				  TYPE_SIZE (TREE_TYPE (type))))
1794	      TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
1795	    else
1796	      TYPE_MODE (type)
1797		= mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1798
1799	    if (TYPE_MODE (type) != BLKmode
1800		&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
1801		&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))
1802		&& TYPE_MODE (type) != BLKmode)
1803	      {
1804		TYPE_NO_FORCE_BLK (type) = 1;
1805		TYPE_MODE (type) = BLKmode;
1806	      }
1807	  }
1808	/* When the element size is constant, check that it is at least as
1809	   large as the element alignment.  */
1810	if (TYPE_SIZE_UNIT (element)
1811	    && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
1812	    /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
1813	       TYPE_ALIGN_UNIT.  */
1814	    && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element))
1815	    && !integer_zerop (TYPE_SIZE_UNIT (element))
1816	    && compare_tree_int (TYPE_SIZE_UNIT (element),
1817			  	 TYPE_ALIGN_UNIT (element)) < 0)
1818	  error ("alignment of array elements is greater than element size");
1819	break;
1820      }
1821
1822    case RECORD_TYPE:
1823    case UNION_TYPE:
1824    case QUAL_UNION_TYPE:
1825      {
1826	tree field;
1827	record_layout_info rli;
1828
1829	/* Initialize the layout information.  */
1830	rli = start_record_layout (type);
1831
1832	/* If this is a QUAL_UNION_TYPE, we want to process the fields
1833	   in the reverse order in building the COND_EXPR that denotes
1834	   its size.  We reverse them again later.  */
1835	if (TREE_CODE (type) == QUAL_UNION_TYPE)
1836	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1837
1838	/* Place all the fields.  */
1839	for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1840	  place_field (rli, field);
1841
1842	if (TREE_CODE (type) == QUAL_UNION_TYPE)
1843	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1844
1845	if (lang_adjust_rli)
1846	  (*lang_adjust_rli) (rli);
1847
1848	/* Finish laying out the record.  */
1849	finish_record_layout (rli, /*free_p=*/true);
1850      }
1851      break;
1852
1853    default:
1854      gcc_unreachable ();
1855    }
1856
1857  /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
1858     records and unions, finish_record_layout already called this
1859     function.  */
1860  if (TREE_CODE (type) != RECORD_TYPE
1861      && TREE_CODE (type) != UNION_TYPE
1862      && TREE_CODE (type) != QUAL_UNION_TYPE)
1863    finalize_type_size (type);
1864
1865  /* If an alias set has been set for this aggregate when it was incomplete,
1866     force it into alias set 0.
1867     This is too conservative, but we cannot call record_component_aliases
1868     here because some frontends still change the aggregates after
1869     layout_type.  */
1870  if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
1871    TYPE_ALIAS_SET (type) = 0;
1872}
1873
1874/* Create and return a type for signed integers of PRECISION bits.  */
1875
1876tree
1877make_signed_type (int precision)
1878{
1879  tree type = make_node (INTEGER_TYPE);
1880
1881  TYPE_PRECISION (type) = precision;
1882
1883  fixup_signed_type (type);
1884  return type;
1885}
1886
1887/* Create and return a type for unsigned integers of PRECISION bits.  */
1888
1889tree
1890make_unsigned_type (int precision)
1891{
1892  tree type = make_node (INTEGER_TYPE);
1893
1894  TYPE_PRECISION (type) = precision;
1895
1896  fixup_unsigned_type (type);
1897  return type;
1898}
1899
1900/* Initialize sizetype and bitsizetype to a reasonable and temporary
1901   value to enable integer types to be created.  */
1902
1903void
1904initialize_sizetypes (bool signed_p)
1905{
1906  tree t = make_node (INTEGER_TYPE);
1907  int precision = GET_MODE_BITSIZE (SImode);
1908
1909  TYPE_MODE (t) = SImode;
1910  TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
1911  TYPE_USER_ALIGN (t) = 0;
1912  TYPE_IS_SIZETYPE (t) = 1;
1913  TYPE_UNSIGNED (t) = !signed_p;
1914  TYPE_SIZE (t) = build_int_cst (t, precision);
1915  TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
1916  TYPE_PRECISION (t) = precision;
1917
1918  /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE.  */
1919  set_min_and_max_values_for_integral_type (t, precision, !signed_p);
1920
1921  sizetype = t;
1922  bitsizetype = build_distinct_type_copy (t);
1923}
1924
1925/* Make sizetype a version of TYPE, and initialize *sizetype
1926   accordingly.  We do this by overwriting the stub sizetype and
1927   bitsizetype nodes created by initialize_sizetypes.  This makes sure
1928   that (a) anything stubby about them no longer exists, (b) any
1929   INTEGER_CSTs created with such a type, remain valid.  */
1930
1931void
1932set_sizetype (tree type)
1933{
1934  int oprecision = TYPE_PRECISION (type);
1935  /* The *bitsizetype types use a precision that avoids overflows when
1936     calculating signed sizes / offsets in bits.  However, when
1937     cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
1938     precision.  */
1939  int precision = MIN (MIN (oprecision + BITS_PER_UNIT_LOG + 1,
1940			    MAX_FIXED_MODE_SIZE),
1941		       2 * HOST_BITS_PER_WIDE_INT);
1942  tree t;
1943
1944  gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
1945
1946  t = build_distinct_type_copy (type);
1947  /* We do want to use sizetype's cache, as we will be replacing that
1948     type.  */
1949  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
1950  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
1951  TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
1952  TYPE_UID (t) = TYPE_UID (sizetype);
1953  TYPE_IS_SIZETYPE (t) = 1;
1954
1955  /* Replace our original stub sizetype.  */
1956  memcpy (sizetype, t, tree_size (sizetype));
1957  TYPE_MAIN_VARIANT (sizetype) = sizetype;
1958
1959  t = make_node (INTEGER_TYPE);
1960  TYPE_NAME (t) = get_identifier ("bit_size_type");
1961  /* We do want to use bitsizetype's cache, as we will be replacing that
1962     type.  */
1963  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
1964  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
1965  TYPE_PRECISION (t) = precision;
1966  TYPE_UID (t) = TYPE_UID (bitsizetype);
1967  TYPE_IS_SIZETYPE (t) = 1;
1968
1969  /* Replace our original stub bitsizetype.  */
1970  memcpy (bitsizetype, t, tree_size (bitsizetype));
1971  TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
1972
1973  if (TYPE_UNSIGNED (type))
1974    {
1975      fixup_unsigned_type (bitsizetype);
1976      ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
1977      TYPE_IS_SIZETYPE (ssizetype) = 1;
1978      sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
1979      TYPE_IS_SIZETYPE (sbitsizetype) = 1;
1980    }
1981  else
1982    {
1983      fixup_signed_type (bitsizetype);
1984      ssizetype = sizetype;
1985      sbitsizetype = bitsizetype;
1986    }
1987
1988  /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
1989     it is sign extended in a way consistent with force_fit_type.  */
1990  if (TYPE_UNSIGNED (type))
1991    {
1992      tree orig_max, new_max;
1993
1994      orig_max = TYPE_MAX_VALUE (sizetype);
1995
1996      /* Build a new node with the same values, but a different type.  */
1997      new_max = build_int_cst_wide (sizetype,
1998				    TREE_INT_CST_LOW (orig_max),
1999				    TREE_INT_CST_HIGH (orig_max));
2000
2001      /* Now sign extend it using force_fit_type to ensure
2002	 consistency.  */
2003      new_max = force_fit_type (new_max, 0, 0, 0);
2004      TYPE_MAX_VALUE (sizetype) = new_max;
2005    }
2006}
2007
2008/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2009   or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2010   for TYPE, based on the PRECISION and whether or not the TYPE
2011   IS_UNSIGNED.  PRECISION need not correspond to a width supported
2012   natively by the hardware; for example, on a machine with 8-bit,
2013   16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2014   61.  */
2015
2016void
2017set_min_and_max_values_for_integral_type (tree type,
2018					  int precision,
2019					  bool is_unsigned)
2020{
2021  tree min_value;
2022  tree max_value;
2023
2024  if (is_unsigned)
2025    {
2026      min_value = build_int_cst (type, 0);
2027      max_value
2028	= build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2029			      ? -1
2030			      : ((HOST_WIDE_INT) 1 << precision) - 1,
2031			      precision - HOST_BITS_PER_WIDE_INT > 0
2032			      ? ((unsigned HOST_WIDE_INT) ~0
2033				 >> (HOST_BITS_PER_WIDE_INT
2034				     - (precision - HOST_BITS_PER_WIDE_INT)))
2035			      : 0);
2036    }
2037  else
2038    {
2039      min_value
2040	= build_int_cst_wide (type,
2041			      (precision - HOST_BITS_PER_WIDE_INT > 0
2042			       ? 0
2043			       : (HOST_WIDE_INT) (-1) << (precision - 1)),
2044			      (((HOST_WIDE_INT) (-1)
2045				<< (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2046				    ? precision - HOST_BITS_PER_WIDE_INT - 1
2047				    : 0))));
2048      max_value
2049	= build_int_cst_wide (type,
2050			      (precision - HOST_BITS_PER_WIDE_INT > 0
2051			       ? -1
2052			       : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2053			      (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2054			       ? (((HOST_WIDE_INT) 1
2055				   << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2056			       : 0));
2057    }
2058
2059  TYPE_MIN_VALUE (type) = min_value;
2060  TYPE_MAX_VALUE (type) = max_value;
2061}
2062
2063/* Set the extreme values of TYPE based on its precision in bits,
2064   then lay it out.  Used when make_signed_type won't do
2065   because the tree code is not INTEGER_TYPE.
2066   E.g. for Pascal, when the -fsigned-char option is given.  */
2067
2068void
2069fixup_signed_type (tree type)
2070{
2071  int precision = TYPE_PRECISION (type);
2072
2073  /* We can not represent properly constants greater then
2074     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2075     as they are used by i386 vector extensions and friends.  */
2076  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2077    precision = HOST_BITS_PER_WIDE_INT * 2;
2078
2079  set_min_and_max_values_for_integral_type (type, precision,
2080					    /*is_unsigned=*/false);
2081
2082  /* Lay out the type: set its alignment, size, etc.  */
2083  layout_type (type);
2084}
2085
2086/* Set the extreme values of TYPE based on its precision in bits,
2087   then lay it out.  This is used both in `make_unsigned_type'
2088   and for enumeral types.  */
2089
2090void
2091fixup_unsigned_type (tree type)
2092{
2093  int precision = TYPE_PRECISION (type);
2094
2095  /* We can not represent properly constants greater then
2096     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2097     as they are used by i386 vector extensions and friends.  */
2098  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2099    precision = HOST_BITS_PER_WIDE_INT * 2;
2100
2101  TYPE_UNSIGNED (type) = 1;
2102
2103  set_min_and_max_values_for_integral_type (type, precision,
2104					    /*is_unsigned=*/true);
2105
2106  /* Lay out the type: set its alignment, size, etc.  */
2107  layout_type (type);
2108}
2109
2110/* Find the best machine mode to use when referencing a bit field of length
2111   BITSIZE bits starting at BITPOS.
2112
2113   The underlying object is known to be aligned to a boundary of ALIGN bits.
2114   If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2115   larger than LARGEST_MODE (usually SImode).
2116
2117   If no mode meets all these conditions, we return VOIDmode.
2118
2119   If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2120   smallest mode meeting these conditions.
2121
2122   If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2123   largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2124   all the conditions.
2125
2126   If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2127   decide which of the above modes should be used.  */
2128
2129enum machine_mode
2130get_best_mode (int bitsize, int bitpos, unsigned int align,
2131	       enum machine_mode largest_mode, int volatilep)
2132{
2133  enum machine_mode mode;
2134  unsigned int unit = 0;
2135
2136  /* Find the narrowest integer mode that contains the bit field.  */
2137  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2138       mode = GET_MODE_WIDER_MODE (mode))
2139    {
2140      unit = GET_MODE_BITSIZE (mode);
2141      if ((bitpos % unit) + bitsize <= unit)
2142	break;
2143    }
2144
2145  if (mode == VOIDmode
2146      /* It is tempting to omit the following line
2147	 if STRICT_ALIGNMENT is true.
2148	 But that is incorrect, since if the bitfield uses part of 3 bytes
2149	 and we use a 4-byte mode, we could get a spurious segv
2150	 if the extra 4th byte is past the end of memory.
2151	 (Though at least one Unix compiler ignores this problem:
2152	 that on the Sequent 386 machine.  */
2153      || MIN (unit, BIGGEST_ALIGNMENT) > align
2154      || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2155    return VOIDmode;
2156
2157  if ((SLOW_BYTE_ACCESS && ! volatilep)
2158      || (volatilep && !targetm.narrow_volatile_bitfield()))
2159    {
2160      enum machine_mode wide_mode = VOIDmode, tmode;
2161
2162      for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2163	   tmode = GET_MODE_WIDER_MODE (tmode))
2164	{
2165	  unit = GET_MODE_BITSIZE (tmode);
2166	  if (bitpos / unit == (bitpos + bitsize - 1) / unit
2167	      && unit <= BITS_PER_WORD
2168	      && unit <= MIN (align, BIGGEST_ALIGNMENT)
2169	      && (largest_mode == VOIDmode
2170		  || unit <= GET_MODE_BITSIZE (largest_mode)))
2171	    wide_mode = tmode;
2172	}
2173
2174      if (wide_mode != VOIDmode)
2175	return wide_mode;
2176    }
2177
2178  return mode;
2179}
2180
2181/* Gets minimal and maximal values for MODE (signed or unsigned depending on
2182   SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2183
2184void
2185get_mode_bounds (enum machine_mode mode, int sign,
2186		 enum machine_mode target_mode,
2187		 rtx *mmin, rtx *mmax)
2188{
2189  unsigned size = GET_MODE_BITSIZE (mode);
2190  unsigned HOST_WIDE_INT min_val, max_val;
2191
2192  gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2193
2194  if (sign)
2195    {
2196      min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2197      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2198    }
2199  else
2200    {
2201      min_val = 0;
2202      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2203    }
2204
2205  *mmin = gen_int_mode (min_val, target_mode);
2206  *mmax = gen_int_mode (max_val, target_mode);
2207}
2208
2209#include "gt-stor-layout.h"
2210