1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1.  Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 * 2.  Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of its
16 *     contributors may be used to endorse or promote products derived from
17 *     this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * Portions of this software have been released under the following terms:
31 *
32 * (c) Copyright 1989-1993 OPEN SOFTWARE FOUNDATION, INC.
33 * (c) Copyright 1989-1993 HEWLETT-PACKARD COMPANY
34 * (c) Copyright 1989-1993 DIGITAL EQUIPMENT CORPORATION
35 *
36 * To anyone who acknowledges that this file is provided "AS IS"
37 * without any express or implied warranty:
38 * permission to use, copy, modify, and distribute this file for any
39 * purpose is hereby granted without fee, provided that the above
40 * copyright notices and this notice appears in all source code copies,
41 * and that none of the names of Open Software Foundation, Inc., Hewlett-
42 * Packard Company or Digital Equipment Corporation be used
43 * in advertising or publicity pertaining to distribution of the software
44 * without specific, written prior permission.  Neither Open Software
45 * Foundation, Inc., Hewlett-Packard Company nor Digital
46 * Equipment Corporation makes any representations about the suitability
47 * of this software for any purpose.
48 *
49 * Copyright (c) 2007, Novell, Inc. All rights reserved.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 *
54 * 1.  Redistributions of source code must retain the above copyright
55 *     notice, this list of conditions and the following disclaimer.
56 * 2.  Redistributions in binary form must reproduce the above copyright
57 *     notice, this list of conditions and the following disclaimer in the
58 *     documentation and/or other materials provided with the distribution.
59 * 3.  Neither the name of Novell Inc. nor the names of its contributors
60 *     may be used to endorse or promote products derived from this
61 *     this software without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
64 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
65 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
66 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
67 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
69 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
70 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
71 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
72 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
73 *
74 * @APPLE_LICENSE_HEADER_END@
75 */
76
77/*
78 **
79 **  NAME:
80 **
81 **      irepgen.c
82 **
83 **  FACILITY:
84 **
85 **      Interface Definition Language (IDL) Compiler
86 **
87 **  ABSTRACT:
88 **
89 **  Generates the Intermediate Representation of the IDL interface.
90 **
91 **  %a%private_begin
92 **
93 **
94 **  %a%private_end
95 */
96
97#include <nidl.h>       /* Standard IDL defs */
98#include <ast.h>        /* Abstract Syntax Tree defs */
99#include <astp.h>       /* AST processing package */
100#include <irep.h>       /* Intermediate Rep defs */
101
102/* Necessary forward function declarations */
103
104static void IR_gen_type_rep(
105		IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
106		AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
107		AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
108		IR_flags_t          flags       /* [in] IREP flags */
109		);
110
111/*
112*  I R _ I N I T _ I N F O
113 *
114*  Allocates and initializes an ir_info node for the AST node.
115*/
116#define IR_INIT_INFO(node_p)\
117{\
118	/* Create an IREP information node */\
119		node_p->ir_info = NEW (IR_info_n_t);\
120}
121
122/*
123*  I R _ I N I T _ N O D E
124 *
125*  Initializes an AST node for IREP processing.  Allocates and initializes an
126*  ir_info node for the AST node.  Allocates a sentinel tuple to start the
127*  node's data_tups list.  Points the current tuple pointer at the sentinel.
128*/
129#define IR_INIT_NODE(node_p)\
130{\
131	/* Create an IREP information node */\
132		node_p->ir_info = NEW (IR_info_n_t);\
133		\
134		/* Create a sentinel tuple to simplify list manipulation */\
135		node_p->data_tups = NEW (IR_tup_n_t);\
136		node_p->data_tups->opcode = IR_op_noop_k;\
137		node_p->data_tups->flags  = 0;\
138		node_p->data_tups->next   = NULL;\
139		\
140		/* Save pointer to tuple in IREP info node */\
141		node_p->ir_info->cur_tup_p = node_p->data_tups;\
142}
143
144/*
145*  I R _ c u r _ t u p _ p
146 *
147*  Returns a pointer to the most recently created tuple in the current scope.
148*/
149static IR_tup_n_t *IR_cur_tup_p
150(
151 IR_scope_ctx_t      *ctx_p      /* [in] Scope context */
152)
153{
154	IR_type_scope_t *type_s_p;
155
156	if (ctx_p->type_scope == 0)
157		return ctx_p->param_p->ir_info->cur_tup_p;
158
159	type_s_p = &(ctx_p->type_s_a[ctx_p->type_scope]);
160	if (type_s_p->flags & IR_REP_AS)
161		return type_s_p->type_p->rep_as_type->ir_info->cur_tup_p;
162	if (type_s_p->flags & IR_CS_CHAR)
163		return type_s_p->type_p->cs_char_type->ir_info->cur_tup_p;
164	return type_s_p->type_p->ir_info->cur_tup_p;
165}
166
167/*
168*  I R _ i n s e r t _ i r e p _ t u p
169 *
170*  Inserts a tuple after the insertion pointer and updates the insertion
171*  pointer to point at the newly inserted tuple.
172*/
173static void IR_insert_irep_tup
174(
175 IR_tup_n_t      *tup_p,         /* [in] Ptr to irep tuple to insert */
176 IR_tup_n_t      **p_insert_p    /* [io] Ptr to tuple to insert after */
177)
178{
179	tup_p->next = (*p_insert_p)->next;
180	(*p_insert_p)->next = tup_p;
181	*p_insert_p = tup_p;
182}
183
184/*
185*  I R _ g e n _ i r e p _ t u p
186 *
187*  Allocates and initializes a tuple.  Inserts the tuple into a list of tuples.
188*  Which list to insert into is determined from the scope context data passed.
189*  It can either a list hanging off the current parameter, or a list hanging
190*  off a type node which is referenced by the parameter.
191*/
192static IR_tup_n_t *IR_gen_irep_tup  /* Returns ptr to generated tuple */
193(
194 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
195 IR_opcode_k_t       opcode      /* [in] irep opcode */
196)
197{
198	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
199
200	tup_p = NEW (IR_tup_n_t);
201	tup_p->opcode = opcode;
202	tup_p->flags  = 0;
203	tup_p->next   = NULL;
204
205	/* Insert in parameter or type data_tups */
206	if (ctx_p->type_scope == 0)
207		IR_insert_irep_tup(tup_p, &ctx_p->param_p->ir_info->cur_tup_p);
208	else if (ctx_p->type_s_a[ctx_p->type_scope].flags & IR_REP_AS)
209		IR_insert_irep_tup(tup_p, &ctx_p->type_s_a[ctx_p->type_scope].type_p->
210				rep_as_type->ir_info->cur_tup_p);
211	else if (ctx_p->type_s_a[ctx_p->type_scope].flags & IR_CS_CHAR)
212		IR_insert_irep_tup(tup_p, &ctx_p->type_s_a[ctx_p->type_scope].type_p->
213				cs_char_type->ir_info->cur_tup_p);
214	else
215		IR_insert_irep_tup(tup_p, &ctx_p->type_s_a[ctx_p->type_scope].type_p->
216				ir_info->cur_tup_p);
217	return tup_p;
218}
219
220/*
221*  I R _ f r e e _ i r e p _ t u p
222 *
223*  Frees a tuple.  Optionally unlinks the tuple from a singly linked list.
224*  If the tuple being freed was the current tuple pointer in the scope
225*  context block, the block is optionally updated to the predecessor tuple.
226*  Use with care - i.e. no checks are done to make sure that there are no
227*  additional references to the freed tuple hanging around.
228*/
229static void IR_free_irep_tup
230(
231 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
232 IR_tup_n_t          *tup_p,     /* [in] Ptr to irep tuple to free */
233 IR_tup_n_t          *pred_tup_p /* [io] Non-NULL => Ptr to predecessor */
234 /* irep tuple; used to unlink from list  */
235)
236{
237	if (pred_tup_p != NULL)
238	{
239		if (pred_tup_p->next != tup_p)
240			INTERNAL_ERROR("Inconsistency in linked list");
241		pred_tup_p->next = tup_p->next;
242
243		/*
244		 * If the tuple being freed was the current tuple pointer, update
245		 * the current tuple pointer to point to the predecessor.
246		 */
247		if (ctx_p->type_scope == 0)
248		{
249			if (ctx_p->param_p->ir_info->cur_tup_p == tup_p)
250				ctx_p->param_p->ir_info->cur_tup_p = pred_tup_p;
251		}
252		else if (ctx_p->type_s_a[ctx_p->type_scope].flags & IR_REP_AS)
253		{
254			if (ctx_p->type_s_a[ctx_p->type_scope].type_p->
255					rep_as_type->ir_info->cur_tup_p == tup_p)
256				ctx_p->type_s_a[ctx_p->type_scope].type_p->
257					rep_as_type->ir_info->cur_tup_p = pred_tup_p;
258		}
259		else if (ctx_p->type_s_a[ctx_p->type_scope].flags & IR_CS_CHAR)
260		{
261			if (ctx_p->type_s_a[ctx_p->type_scope].type_p->
262					cs_char_type->ir_info->cur_tup_p == tup_p)
263				ctx_p->type_s_a[ctx_p->type_scope].type_p->
264					cs_char_type->ir_info->cur_tup_p = pred_tup_p;
265		}
266		else
267		{
268			if (ctx_p->type_s_a[ctx_p->type_scope].type_p->
269					ir_info->cur_tup_p == tup_p)
270				ctx_p->type_s_a[ctx_p->type_scope].type_p->
271					ir_info->cur_tup_p = pred_tup_p;
272		}
273	}
274	FREE(tup_p);
275}
276
277/*
278*  I R _ g e n _ a l i g n m e n t
279*  Generates an alignment tuple if the passed alignment value is more than 1.
280*  Returns tuple address if tuple was generated, NULL otherwise.
281*/
282static IR_tup_n_t *IR_gen_alignment /* Returns tuple ptr or NULL */
283(
284 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
285 int                 alignment   /* [in] Required alignment */
286)
287{
288	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
289
290	if (alignment <= 1)
291		tup_p = NULL;
292	else
293	{
294		tup_p = IR_gen_irep_tup(ctx_p, IR_op_align_k);
295		tup_p->arg[IR_ARG_INT].int_val = alignment;
296	}
297	return tup_p;
298}
299
300/*
301*  I R _ p a r a m _ n u m
302 *
303*  Returns the parameter number of a parameter.  Operation parameters are
304*  numbered starting with 1; the function result is parameter 0.
305*/
306static unsigned long IR_param_num   /* Returns parameter number */
307(
308 AST_parameter_n_t   *lookup_p   /* [in] Ptr to AST parameter node */
309)
310{
311	AST_operation_n_t   *oper_p;    /* Ptr to AST operation node */
312	AST_parameter_n_t   *param_p;   /* Ptr to a parameter node to test */
313	unsigned long       param_num;  /* Parameter number */
314
315	oper_p = lookup_p->uplink;
316	param_num = 0;
317
318	for (param_p = oper_p->parameters; param_p != NULL; param_p = param_p->next)
319	{
320		param_num++;
321		if (param_p == lookup_p)
322			return param_num;
323	}
324
325	if (param_p == oper_p->result)
326		return 0;
327
328	INTERNAL_ERROR("Parameter not found in operation parameter list");
329	return 0;
330}
331
332/*
333*  I R _ f i e l d _ n u m
334 *
335*  Returns the field number of a field.  Structure fields in a non-nested
336*  structure are numbered starting with 1.  Nested structure fields are
337*  numbered consecutively starting with the number of the last field before
338*  the nested structure plus one, or 1 if the first field.
339*/
340static unsigned long IR_field_num   /* Returns field number */
341(
342 IR_scope_ctx_t  *ctx_p ATTRIBUTE_UNUSED,         /* [in] Scope context */
343 AST_field_n_t   *attr_field_p,  /* [in] Field with a field attribute */
344 AST_field_n_t   *lookup_p       /* [in] Field referenced by attribute */
345)
346{
347	AST_field_n_t       *field_p;   /* Ptr to a field node to test */
348	unsigned long       field_num;  /* Field number */
349
350	/*
351	 * First scan forward from the current field for the reference.  This
352	 * assures that if there are multiple instances of the same substruct,
353	 * we assign the field number that goes with the current instance.
354	 */
355	field_num = attr_field_p->ir_info->id_num;
356	for (field_p = attr_field_p->next; field_p != NULL; field_p = field_p->next)
357	{
358		if (field_p->ir_info == NULL)
359			IR_INIT_INFO(field_p);
360
361		field_num++;
362		field_p->ir_info->id_num = field_num;
363		if (field_p == lookup_p)
364			return field_num;
365	}
366
367	/*
368	 * Not found forward in the struct so must be a backward reference.  It
369	 * therefore follows that a field number has already been assigned to the
370	 * referenced field so we can just pick up the field number.
371	 */
372	return lookup_p->ir_info->id_num;
373}
374
375/*
376*  I R _ g e n _ s t r u c t _ t y p e _ r e p
377 *
378*  Generates tuples for a non-nested or nested structure definition.
379*  A structure definition is only done once, at the first reference, and
380*  subsequent references are to the already completed definition.
381*/
382static void IR_gen_struct_type_rep
383(
384 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
385 AST_type_n_t        *type_p,    /* [io] AST struct type node */
386 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
387 unsigned long   *field_num_p,   /* [io] Starting field number */
388 boolean         *has_nf_cs_array/* [io] TRUE => struct or a nested struct */
389 /* has non-fixed array of [cs_char] field */
390)
391{
392	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
393	IR_tup_n_t          *cs_tup_p = NULL;  /* Ptr to codeset shadow tuple */
394	IR_tup_n_t          *pred_tup_p = NULL;/* Saved tuple pointer */
395	IR_tup_n_t          *info_tup_p = NULL;/* Ptr to conformance info tuple */
396	IR_tup_n_t          *last_tup_p = NULL;/* Ptr to tup before last field tuples */
397	AST_structure_n_t   *struct_p;  /* Ptr to AST structure node */
398	AST_field_n_t       *field_p;   /* Ptr to AST field node */
399	AST_field_n_t   *last_field_p = NULL;  /* Ptr to last field in struct */
400
401	/*
402	 * Generate 'struct begin' tuple and maintain scope context.
403	 */
404	tup_p = IR_gen_irep_tup(ctx_p, IR_op_struct_begin_k);
405	tup_p->arg[IR_ARG_EXPR].expr = NULL;
406	tup_p->arg[IR_ARG_TYPE].type = type_p;
407	tup_p->arg[IR_ARG_INST].inst = inst_p;
408	IR_process_tup(ctx_p, tup_p);
409
410	/*
411	 * Generate 'conformance info' tuple if outermost conformant struct
412	 * and set conformant flag on 'struct begin' tuple.
413	 */
414	if (AST_CONFORMANT_SET(type_p) && IR_in_struct(ctx_p) == 1)
415	{
416		tup_p->flags |= IR_CONFORMANT;
417		tup_p = info_tup_p = IR_gen_irep_tup(ctx_p, IR_op_conformant_info_k);
418	}
419
420	/*
421	 * If non-nested struct, generate a placeholder tuple in case the struct
422	 * or any nested struct has a non-fixed array of [cs_char].
423	 */
424	if (IR_in_struct(ctx_p) == 1)
425	{
426		pred_tup_p = tup_p;
427		cs_tup_p = IR_gen_irep_tup(ctx_p, IR_op_noop_k);
428	}
429	if (FE_TEST(type_p->fe_info->flags, FE_HAS_NF_CS_ARRAY))
430		*has_nf_cs_array = TRUE;
431
432	/*
433	 * If non-nested struct, generate alignment tuple if necessary since
434	 * the structure definition can be referenced at any alignment.
435	 */
436	if (!AST_UNALIGN_SET(type_p) && IR_in_struct(ctx_p) == 1)
437		IR_gen_alignment(ctx_p, type_p->alignment_size);
438
439	/*
440	 * Process each structure field.
441	 */
442	struct_p = type_p->type_structure.structure;
443
444	for (field_p = struct_p->fields; field_p != NULL; field_p = field_p->next)
445	{
446		/*
447		 * If not done yet, set up irep private info for field.  A field number
448		 * is stored here for fields that are not themselves structures.
449		 */
450		if (field_p->ir_info == NULL)
451			IR_INIT_INFO(field_p);
452		/*
453		 * Save field node address and predecessor of field's first tuple so
454		 * that if the struct is conformant we can later locate information
455		 * to patch the 'conformance info' tuple created above.
456		 */
457		last_field_p = field_p;
458		last_tup_p   = IR_cur_tup_p(ctx_p);
459
460		/*
461		 * Recurse to generate tuples for field data type.
462		 */
463		if (field_p->type->kind != AST_structure_k
464				||  field_p->type->xmit_as_type != NULL
465				||  field_p->type->rep_as_type != NULL
466				||  field_p->type->cs_char_type != NULL)
467		{
468			(*field_num_p)++;
469			field_p->ir_info->id_num = *field_num_p;
470			IR_gen_type_rep(ctx_p, field_p->type, (AST_instance_n_t *)field_p,
471					0);
472		}
473		else
474		{
475			/* First generate alignment for nested struct if needed */
476			if (!AST_UNALIGN_SET(field_p->type))
477				IR_gen_alignment(ctx_p, field_p->type->alignment_size);
478
479			/* Recurse to generate nested struct tuples */
480			IR_gen_struct_type_rep(ctx_p, field_p->type,
481					(AST_instance_n_t *)field_p, field_num_p, has_nf_cs_array);
482		}
483	}
484
485	/*
486	 * Patch 'conformance info' tuple to point at 'conformant array' tuple.
487	 */
488	if (AST_CONFORMANT_SET(type_p) && IR_in_struct(ctx_p) == 1)
489	{
490		while (last_tup_p != NULL
491               && last_tup_p->opcode != IR_op_conformant_array_k
492				&& last_tup_p->opcode != IR_op_open_array_k)
493			last_tup_p = last_tup_p->next;
494
495        assert(last_tup_p != NULL);
496
497		/*
498		 * If array of array skip to next conformant or open array tuple so
499		 * that 'conformance info' tuple points at the flat, not full, rep.
500		 */
501		if (last_field_p->type->type_structure.array->element_type->kind
502				== AST_array_k)
503		{
504            last_tup_p = last_tup_p->next;
505			while (last_tup_p != NULL
506                    && last_tup_p->opcode != IR_op_conformant_array_k
507					&& last_tup_p->opcode != IR_op_open_array_k)
508				last_tup_p = last_tup_p->next;
509
510            assert(last_tup_p != NULL);
511		}
512        assert (info_tup_p != NULL);
513        info_tup_p->arg[IR_ARG_TUP].tup = last_tup_p;
514	}
515
516	/*
517	 * Process the placeholder tuple created above.  If the struct or any nested
518	 * struct has a non-fixed array of [cs_char], turn the tuple into a
519	 * 'codeset shadow' tuple, otherwise unlink and delete the tuple.  In the
520	 * former case, also create a 'release shadow' tuple before 'struct end'.
521	 */
522	if (IR_in_struct(ctx_p) == 1)
523	{
524		if (*has_nf_cs_array)
525		{
526            assert (cs_tup_p != NULL);
527            cs_tup_p->opcode = IR_op_codeset_shadow_k;
528            cs_tup_p->arg[IR_ARG_INT].int_val = *field_num_p;
529			IR_gen_irep_tup(ctx_p, IR_op_release_shadow_k);
530		}
531		else
532			IR_free_irep_tup(ctx_p, cs_tup_p, pred_tup_p);
533	}
534
535	/*
536	 * Generate 'struct end' tuple and maintain scope context.
537	 */
538	tup_p = IR_gen_irep_tup(ctx_p, IR_op_struct_end_k);
539	tup_p->arg[IR_ARG_EXPR].expr = NULL;
540	tup_p->arg[IR_ARG_TYPE].type = type_p;
541	tup_p->arg[IR_ARG_INST].inst = inst_p;
542	IR_process_tup(ctx_p, tup_p);
543}
544
545/*
546*  I R _ g e n _ s t r u c t _ r e p
547 *
548*  Generates tuples to reference a non-nested structure and if the structure
549*  type has not yet been processed, generates tuples for the type.
550*/
551static void IR_gen_struct_rep
552(
553 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
554 AST_type_n_t        *type_p,    /* [io] AST type node */
555 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
556)
557{
558	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
559	unsigned long       start_num;  /* Starting field number */
560	boolean         has_nf_cs_array;/* TRUE => struct or a nested struct      */
561	/* has non-fixed array of [cs_char] field */
562
563	/*
564	 * If a tagged reference to a self-pointing type, use the original type node
565	 * if one exists.
566	 */
567	if (AST_SELF_POINTER_SET(type_p) && AST_DEF_AS_TAG_SET(type_p)
568			&& type_p->fe_info->original != NULL)
569		type_p = type_p->fe_info->original;
570
571	/*
572	 * Generate indirect reference tuple.
573	 */
574	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
575	tup_p->arg[IR_ARG_EXPR].expr = NULL;
576	tup_p->arg[IR_ARG_TYPE].type = type_p;
577	tup_p->arg[IR_ARG_INST].inst = inst_p;
578
579	/*
580	 * Generate tuples for struct type if not yet done.
581	 */
582	if (type_p->ir_info == NULL)
583	{
584		/* Initialize type IREP info. */
585		IR_INIT_NODE(type_p);
586
587		/*
588		 * Maintain scope context.  This will cause subsequent tuple insertions
589		 * to be into the type node's tuple list until another indirect type
590		 * reference is generated or the end of this type reference.
591		 */
592		IR_process_tup(ctx_p, tup_p);
593
594		/*
595		 * Generate tuples for non-nested struct type.  When control returns
596		 * from this routine, the indirect type scope will have been popped.
597		 */
598		start_num = 0;
599		has_nf_cs_array = FALSE;
600		IR_gen_struct_type_rep(ctx_p, type_p, inst_p, &start_num,
601				&has_nf_cs_array);
602	}
603
604	/*
605	 * Propagate irep type flags to parameter where appropriate.
606	 */
607	if (!AST_IN_SET(ctx_p->param_p)
608			&& type_p->ir_info->allocate_ref
609			&& !IR_under_pointer(ctx_p))
610		ctx_p->param_p->ir_info->allocate_ref = TRUE;
611}
612
613/*
614*  I R _ c a s e _ i n f o _ c o m p a r e
615 *
616*  Routine called by sort routine to compare the contants represented by two
617*  case information nodes.  Returns an integer less than or greater than 0
618*  according as the first argument is to be considered less than or greater
619*  than the second.
620 *
621*  Assumption: first argument is never equal to second argument.
622*/
623static int IR_case_info_compare
624(
625 IR_case_info_n_t    *p1,        /* [io] Ptr to first case info node */
626 IR_case_info_n_t    *p2         /* [io] Ptr to second case info node */
627)
628{
629	if (p1->value < p2->value)
630		return -1;
631	else
632		return 1;
633}
634
635/*
636*  I R _ g e n _ u n i o n _ t y p e _ r e p
637 *
638*  Generates tuples for an encapsulated or non-encapsulated union definition.
639*  A union definition is only done once, at the first reference, and
640*  subsequent references are to the already completed definition.
641*/
642static void IR_gen_union_type_rep
643(
644 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
645 AST_type_n_t        *type_p,    /* [io] AST struct type node */
646 AST_instance_n_t    *inst_p ATTRIBUTE_UNUSED     /* [in] Ptr to AST instance node */
647)
648{
649	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
650	IR_tup_n_t          *beg_tup_p; /* Ptr to 'union begin' tuple */
651	AST_disc_union_n_t  *union_p;   /* Ptr to AST union node */
652	AST_arm_n_t         *arm_p;     /* Ptr to AST arm node */
653	AST_case_label_n_t  *label_p;   /* Ptr to AST case label node */
654	AST_constant_n_t    *value_p;   /* Ptr to AST const node for case label */
655	AST_arm_n_t         *default_p; /* Ptr to default arm for union */
656	IR_case_info_n_t    *array_p;   /* Ptr to array of union case info */
657	IR_case_info_n_t  *beg_array_p; /* Ptr to start of case info array */
658	IR_case_info_n_t  *end_array_p; /* Ptr to end of case info array */
659	long                num_arms;   /* Number of non-default arms in union */
660
661	union_p = type_p->type_structure.disc_union;
662
663	/*
664	 * Generate 'union begin' tuple and maintain scope context.
665	 */
666	beg_tup_p = IR_gen_irep_tup(ctx_p, IR_op_disc_union_begin_k);
667	beg_tup_p->arg[IR_ARG_TYPE].type = type_p;
668	/* Set 'encapsulated' flag for encapsulated union */
669	if (union_p->discrim_name != NAMETABLE_NIL_ID)
670		beg_tup_p->flags |= IR_ENCAPSULATED;
671	/* Maintain scope */
672	IR_process_tup(ctx_p, beg_tup_p);
673
674	/*
675	 * For encapsulated union, generate 'encapsulated switch' tuple.
676	 */
677	if (beg_tup_p->flags & IR_ENCAPSULATED)
678	{
679		tup_p = IR_gen_irep_tup(ctx_p, IR_op_switch_enc_k);
680		tup_p->arg[IR_ARG_NAME].name = union_p->discrim_name;
681		tup_p->arg[IR_ARG_TYPE].type = union_p->discrim_type;
682	}
683
684	/*
685	 * Make a first pass through the union arms to count the number of
686	 * non-default arms and place the result in the 'union begin' tuple.
687	 */
688	num_arms = 0;
689	default_p = NULL;
690	for (arm_p = union_p->arms; arm_p != NULL; arm_p = arm_p->next)
691	{
692		/* Count a separate arm for each case label constant. */
693		for (label_p = arm_p->labels; label_p != NULL; label_p = label_p->next)
694		{
695			if (label_p->default_label)
696				default_p = arm_p;
697			else
698				num_arms++;
699		}
700	}
701	beg_tup_p->arg[IR_ARG_INT].int_val = num_arms;
702
703	/*
704	 * Allocate an array to hold arm information.  Make a second pass through
705	 * union arms to load this array.
706	 */
707	if (num_arms != 0)
708	{
709		array_p = NEW_VEC (IR_case_info_n_t, num_arms);
710		beg_array_p = array_p;
711		end_array_p = array_p;
712		for (arm_p = union_p->arms; arm_p != NULL; arm_p = arm_p->next)
713		{
714			for (label_p = arm_p->labels; label_p != NULL; label_p = label_p->next)
715			{
716				if (label_p->default_label)
717					continue;
718
719				value_p = label_p->value;
720				switch (value_p->kind)
721				{
722					case AST_int_const_k:
723						end_array_p->value = (unsigned long)value_p->value.int_val;
724						break;
725					case AST_char_const_k:
726						end_array_p->value = (unsigned long)value_p->value.char_val;
727						break;
728					case AST_boolean_const_k:
729						end_array_p->value = (unsigned long)value_p->value.boolean_val;
730						break;
731					default:
732						INTERNAL_ERROR("Unsupported case label constant kind");
733				}
734				end_array_p->arm_p   = arm_p;
735				end_array_p->label_p = label_p;
736				end_array_p++;
737			}
738		}
739		/* Sort the array in ascending order of case value. */
740		qsort(array_p, num_arms, sizeof(IR_case_info_n_t),
741#if !(defined(mips) && defined(ultrix))
742				(int (*)(const void *, const void *))
743#endif
744				IR_case_info_compare);
745
746		/*
747		 * Process the sorted array of case information.
748		 */
749		while (array_p < end_array_p)
750		{
751			/* Generate 'case' tuple */
752			tup_p = IR_gen_irep_tup(ctx_p, IR_op_case_k);
753			if (union_p->discrim_type->kind == AST_boolean_k)
754				tup_p->flags |= IR_BOOLEAN;
755			tup_p->arg[IR_ARG_INT].int_val = array_p->value;
756
757			/* If not empty arm, recurse to generate tuples for arm data type */
758			if (array_p->arm_p->type == NULL)
759				tup_p->flags |= IR_VOID;
760			else
761			{
762				IR_gen_type_rep(ctx_p, array_p->arm_p->type,
763						(AST_instance_n_t *)array_p->arm_p, 0);
764			}
765			array_p++;
766		}
767		FREE(beg_array_p);
768	}
769
770	/*
771	 * Generate 'default case' tuple and recurse to gen tuples for default type.
772	 */
773	tup_p = IR_gen_irep_tup(ctx_p, IR_op_default_k);
774	if (default_p == NULL)
775		tup_p->arg[IR_ARG_TYPE].type = NULL;
776	else
777	{
778		tup_p->arg[IR_ARG_TYPE].type = default_p->type;
779		if (default_p->type == NULL)
780			tup_p->flags |= IR_VOID;
781		else
782		{
783			IR_gen_type_rep(ctx_p, default_p->type,
784					(AST_instance_n_t *)default_p, 0);
785		}
786	}
787
788	/*
789	 * Generate 'union end' tuple and maintain scope context.
790	 */
791	tup_p = IR_gen_irep_tup(ctx_p, IR_op_disc_union_end_k);
792	tup_p->arg[IR_ARG_TYPE].type = type_p;
793	tup_p->arg[IR_ARG_INT].int_val = num_arms;
794	tup_p->flags = beg_tup_p->flags;
795	IR_process_tup(ctx_p, tup_p);
796}
797
798/*
799*  I R _ g e n _ d i s c _ u n i o n _ r e p
800 *
801*  Generates tuples to reference a discriminated union and if the union
802*  type has not yet been processed, generates tuples for the type.
803*/
804static void IR_gen_disc_union_rep
805(
806 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
807 AST_type_n_t        *type_p,    /* [io] AST type node */
808 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
809)
810{
811	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
812	AST_field_attr_n_t  *fattr_p;   /* Ptr to AST field attributes node */
813
814	/*
815	 * If a tagged reference to a self-pointing type, use the original type node
816	 * if one exists.
817	 */
818	if (AST_SELF_POINTER_SET(type_p) && AST_DEF_AS_TAG_SET(type_p)
819			&& type_p->fe_info->original != NULL)
820		type_p = type_p->fe_info->original;
821
822	/*
823	 * If union is non-encapsulated, generate a tuple for switch information.
824	 */
825	if (type_p->type_structure.disc_union->discrim_name == NAMETABLE_NIL_ID)
826	{
827		fattr_p = inst_p->field_attrs;
828		tup_p = IR_gen_irep_tup(ctx_p, IR_op_switch_n_e_k);
829		if (IR_under_struct(ctx_p))
830		{
831			tup_p->arg[IR_ARG_FIELD].field = fattr_p->switch_is->ref.f_ref;
832			tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
833					(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
834		}
835		else
836		{
837			tup_p->arg[IR_ARG_PARAM].param = fattr_p->switch_is->ref.p_ref;
838			tup_p->arg[IR_ARG_PFNUM].int_val =
839				IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
840		}
841	}
842
843	/*
844	 * Generate indirect reference tuple.
845	 */
846	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
847	tup_p->arg[IR_ARG_EXPR].expr = NULL;
848	tup_p->arg[IR_ARG_TYPE].type = type_p;
849	tup_p->arg[IR_ARG_INST].inst = inst_p;
850
851	/*
852	 * Generate tuples for union type if not yet done.
853	 */
854	if (type_p->ir_info == NULL)
855	{
856		/* Initialize type IREP info. */
857		IR_INIT_NODE(type_p);
858
859		/*
860		 * Maintain scope context.  This will cause subsequent tuple insertions
861		 * to be into the type node's tuple list until another indirect type
862		 * reference is generated or the end of this type reference.
863		 */
864		IR_process_tup(ctx_p, tup_p);
865
866		/*
867		 * Generate tuples for union type.  When control returns
868		 * from this routine, the indirect type scope will have been popped.
869		 */
870		IR_gen_union_type_rep(ctx_p, type_p, inst_p);
871	}
872}
873
874/*
875*  I R _ g e n _ i n t e r f a c e _ r e p
876 *
877*  Generates an IR_op_interface_k tuple.
878*/
879static void IR_gen_interface_rep
880(
881 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
882 AST_type_n_t        *type_p,    /* [io] AST type node */
883 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
884)
885{
886	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
887
888	tup_p = IR_gen_irep_tup(ctx_p, IR_op_interface_k);
889	tup_p->arg[IR_ARG_TYPE].type = type_p;
890	tup_p->arg[IR_ARG_INST].inst = inst_p;
891	tup_p->arg[IR_ARG_INTFC].intfc = type_p->type_structure.interface;
892}
893
894/*
895*  I R _ g e n _ p t r _ t u p
896 *
897*  Generates an IR_op_*_ptr_k or IR_op_passed_by_ref_k tuple.
898*/
899static IR_tup_n_t *IR_gen_ptr_tup   /* Returns ptr to generated tuple */
900(
901 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
902 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
903 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
904)
905{
906	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
907
908	if (   (inst_p != NULL && AST_UNIQUE_SET(inst_p))
909			|| (inst_p == NULL && AST_UNIQUE_SET(type_p)) )
910	{
911		tup_p = IR_gen_irep_tup(ctx_p, IR_op_unique_ptr_k);
912	}
913	else if (   (inst_p != NULL && AST_PTR_SET(inst_p))
914			|| (inst_p == NULL && AST_PTR_SET(type_p)) )
915	{
916		tup_p = IR_gen_irep_tup(ctx_p, IR_op_full_ptr_k);
917	}
918	else if (IR_cur_scope(ctx_p) != IR_SCP_TOPLEVEL)
919	{
920		tup_p = IR_gen_irep_tup(ctx_p, IR_op_ref_ptr_k);
921		/*
922		 * [ref] pointers that are not under a full or unique pointer in an
923		 * [out]-only parameter require preallocation on server side - set flag.
924		 */
925		if (ctx_p->type_scope == 0)
926		{
927			/*
928			 * [ref] pointer in non-indirect scope - set 'allocate ref' flag
929			 * on parameter if it is [out]-only unless we are under any
930			 * non-passed-by-ref pointer.
931			 */
932			if (!AST_IN_SET(ctx_p->param_p) && !IR_under_pointer(ctx_p))
933				ctx_p->param_p->ir_info->allocate_ref = TRUE;
934		}
935		else
936		{
937			/*
938			 * [ref] pointer in an indirect type scope (must be struct type) -
939			 * set 'allocate ref' flag on type.  This must be done irrespective
940			 * of whether the current parameter is [out]-only or whether we are
941			 * under any non-passed-by-ref pointer, since, for example, a
942			 * subsequent [out]-only parameter can reference this same type
943			 * which is only flattened once.  When this type is referenced the
944			 * flag will be propagated to the parameter if the appropriate
945			 * conditions are met.
946			 */
947			ctx_p->type_s_a[ctx_p->type_scope].type_p->ir_info->allocate_ref
948				= TRUE;
949		}
950	}
951	else
952		tup_p = IR_gen_irep_tup(ctx_p, IR_op_passed_by_ref_k);
953
954	/*
955	 * Conditionally set IR_STRING and/or IR_ARRAYIFIED_PTR flags.
956	 */
957	if (IR_STRING_ARRAY(type_p, inst_p) || IR_STRINGIFIED(type_p, inst_p))
958		tup_p->flags |= IR_STRING;
959	if (IR_ARRAYIFIED(type_p, inst_p))
960		tup_p->flags |= IR_ARRAYIFIED_PTR;
961
962	tup_p->arg[IR_ARG_EXPR].expr = NULL;
963	tup_p->arg[IR_ARG_TYPE].type = type_p;
964	tup_p->arg[IR_ARG_INST].inst = inst_p;
965	return tup_p;
966}
967
968/*
969*  I R _ g e n _ a r r a y _ t u p
970 *
971*  Generates an IR_op_*_array_k tuple.
972*/
973static IR_tup_n_t *IR_gen_array_tup /* Returns ptr to generated tuple */
974(
975 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
976 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
977 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
978 IR_flags_t          flags       /* [in] IREP flags: IR_STRING set iff     */
979 /*     object is a [string] array         */
980 /*     IR_CS_CHAR iff base type [cs_char] */
981)
982{
983	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
984	boolean conformant, varying, string;
985
986	conformant  = (AST_CONFORMANT_SET(type_p) != 0);
987	string      = ((flags & IR_STRING) != 0);
988	varying     = (string || (inst_p != NULL && AST_VARYING_SET(inst_p)));
989
990	if (conformant)
991	{
992		if (varying)
993			tup_p = IR_gen_irep_tup(ctx_p, IR_op_open_array_k);
994		else
995			tup_p = IR_gen_irep_tup(ctx_p, IR_op_conformant_array_k);
996	}
997	else
998	{
999		if (varying)
1000			tup_p = IR_gen_irep_tup(ctx_p, IR_op_varying_array_k);
1001		else
1002			tup_p = IR_gen_irep_tup(ctx_p, IR_op_fixed_array_k);
1003	}
1004
1005	tup_p->flags |= (flags & (IR_CS_CHAR|IR_STRING)); /* OR in certain flags */
1006	tup_p->arg[IR_ARG_EXPR].expr = NULL;
1007	tup_p->arg[IR_ARG_TYPE].type = type_p;
1008	tup_p->arg[IR_ARG_INST].inst = inst_p;
1009	return tup_p;
1010}
1011
1012/*
1013 *  I R _ b o u n d _ e a r l y
1014 *
1015 *  Determine whether attribute field occurs before instance, so that
1016 *  we can enable in-line correlation checking
1017 */
1018static boolean IR_bound_early
1019(
1020 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1021 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
1022 unsigned long       attribute_index /* [in] index of field attribute */
1023)
1024{
1025	unsigned long pf_index;
1026
1027	if (IR_under_struct(ctx_p))
1028	{
1029		pf_index = IR_field_num(ctx_p, (AST_field_n_t *)inst_p,
1030				(AST_field_n_t *)inst_p);
1031	}
1032	else
1033	{
1034		pf_index = IR_param_num((AST_parameter_n_t *)inst_p);
1035	}
1036
1037	return (attribute_index < pf_index);
1038}
1039
1040/*
1041 *  I R _ g e n _ b o u n d _ t u p s
1042 *
1043 *  Generates a sequence of IR_op_bound_k tuples to describe the bounds
1044 *  of an array.
1045 */
1046static IR_tup_n_t *IR_gen_bound_tups    /* Returns ptr to last tuple gen'd */
1047(
1048 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1049 AST_type_n_t        *type_p,    /* [in] Ptr to AST array type node */
1050 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
1051 IR_flags_t          flags       /* [in] IREP flags: IR_STRING set iff     */
1052 /*     object is a [string] array         */
1053 /*     IR_CS_CHAR iff base type [cs_char] */
1054)
1055{
1056	IR_tup_n_t          *tup_p = NULL;     /* Ptr to irep tuple and args */
1057	IR_tup_n_t          *lower_tup_p = NULL;
1058	AST_array_n_t       *array_p = NULL;   /* Ptr to AST array node */
1059	AST_array_index_n_t *index_p;   /* Ptr to AST array index node */
1060	AST_constant_n_t    *const_p;   /* Ptr to AST constant node */
1061	AST_field_attr_n_t  *fattr_p;   /* Ptr to AST field attributes node */
1062	int i;
1063	boolean string;
1064
1065	string = ((flags & IR_STRING) != 0);
1066	array_p = type_p->type_structure.array;
1067	fattr_p = (inst_p == NULL) ? NULL : inst_p->field_attrs;
1068
1069	/*
1070	 * For each dimension in array:
1071	 */
1072	for (i = 0; i < array_p->index_count; i++)
1073	{
1074		index_p = &array_p->index_vec[i];
1075
1076		/*
1077		 * Generate IR_op_bound_k for lower bound.  Note: AST array_rep_type for
1078		 * arrayified ptr can have constant lower bound even if an instance of
1079		 * it has a min_is attribute, so must test for min_is attribute first.
1080		 */
1081		const_p = index_p->lower_bound;
1082		lower_tup_p = tup_p = IR_gen_irep_tup(ctx_p, IR_op_bound_k);
1083
1084		if (fattr_p != NULL &&
1085				fattr_p->min_is_vec != NULL && fattr_p->min_is_vec[i].valid)
1086		{
1087			if (fattr_p->min_is_vec[i].constant)
1088			{
1089				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_fixed_k;
1090				tup_p->arg[IR_ARG_INT].int_val   = fattr_p->min_is_vec[i].ref.integer;
1091			}
1092			else
1093			{
1094				/* Pick up the referenced [min_is] field/parameter */
1095				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_min_is_k;
1096				if (IR_under_struct(ctx_p))
1097				{
1098					tup_p->arg[IR_ARG_FIELD].field = fattr_p->min_is_vec[i].ref.f_ref;
1099					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1100							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1101				}
1102				else
1103				{
1104					tup_p->arg[IR_ARG_PARAM].param = fattr_p->min_is_vec[i].ref.p_ref;
1105					tup_p->arg[IR_ARG_PFNUM].int_val =
1106						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1107				}
1108				/* Set the early correlation flag if referenced field occurs first */
1109				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1110					tup_p->flags |= IR_CF_EARLY;
1111			}
1112		}
1113		else
1114		{
1115			/* Constant lower bound */
1116			tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_fixed_k;
1117			tup_p->arg[IR_ARG_INT].int_val   = const_p->value.int_val;
1118		}
1119
1120		/*
1121		 * Generate IR_op_bound_k for upper bound.  Note: AST array_rep_type for
1122		 * arrayified ptr always has a dynamic upper bound by definition of
1123		 * arrayifying, so no need to test for dynamic bound before fixed bound
1124		 * as with the lower bound above.
1125		 */
1126		const_p = index_p->upper_bound;
1127		tup_p = IR_gen_irep_tup(ctx_p, IR_op_bound_k);
1128
1129		if (const_p != NULL)
1130		{
1131			/* Constant upper bound */
1132			tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_fixed_k;
1133			tup_p->arg[IR_ARG_INT].int_val   = const_p->value.int_val;
1134		}
1135		else if (fattr_p != NULL &&
1136				fattr_p->max_is_vec != NULL && fattr_p->max_is_vec[i].valid)
1137		{
1138			if (fattr_p->max_is_vec[i].constant)
1139			{
1140				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_fixed_k;
1141				tup_p->arg[IR_ARG_INT].int_val   = fattr_p->max_is_vec[i].ref.integer;
1142			}
1143			else
1144			{
1145				/* Pick up the referenced [max_is] field/parameter */
1146				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_max_is_k;
1147				if (IR_under_struct(ctx_p))
1148				{
1149					tup_p->arg[IR_ARG_FIELD].field = fattr_p->max_is_vec[i].ref.f_ref;
1150					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1151							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1152				}
1153				else
1154				{
1155					tup_p->arg[IR_ARG_PARAM].param = fattr_p->max_is_vec[i].ref.p_ref;
1156					tup_p->arg[IR_ARG_PFNUM].int_val =
1157						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1158				}
1159				/* Set the early correlation flag if referenced field occurs first */
1160				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1161					tup_p->flags |= IR_CF_EARLY;
1162			}
1163		}
1164		else if (fattr_p != NULL &&
1165				fattr_p->size_is_vec != NULL && fattr_p->size_is_vec[i].valid)
1166		{
1167			if (fattr_p->size_is_vec[i].constant)
1168			{
1169				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_fixed_k;
1170				tup_p->arg[IR_ARG_INT].int_val   = 0;
1171				if (lower_tup_p->arg[IR_ARG_BOUND].bound_k == IR_bnd_fixed_k)
1172					tup_p->arg[IR_ARG_INT].int_val += lower_tup_p->arg[IR_ARG_INT].int_val;
1173				tup_p->arg[IR_ARG_INT].int_val   += fattr_p->size_is_vec[i].ref.integer - 1;
1174			}
1175			else
1176			{
1177				/* Pick up the referenced [size_is] field/parameter */
1178				tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_size_is_k;
1179				tup_p->flags |= (flags & IR_CS_CHAR); /* OR in certain flags */
1180				if (IR_under_struct(ctx_p))
1181				{
1182					tup_p->arg[IR_ARG_FIELD].field = fattr_p->size_is_vec[i].ref.f_ref;
1183					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1184							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1185
1186				}
1187				else
1188				{
1189					tup_p->arg[IR_ARG_PARAM].param = fattr_p->size_is_vec[i].ref.p_ref;
1190					tup_p->arg[IR_ARG_PFNUM].int_val =
1191						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1192				}
1193				tup_p->arg[IR_ARG_BOUND_XTRA].byt_val = fattr_p->size_is_vec[i].xtra_opcode;
1194				/* Set the early correlation flag if referenced field occurs first */
1195				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1196					tup_p->flags |= IR_CF_EARLY;
1197			}
1198		}
1199		else if (string)
1200		{
1201			/*
1202			 * Get here only for conformant string without an explicit max_is or
1203			 * size_is attribute - upper bound must contain string octet size.
1204			 */
1205			tup_p->arg[IR_ARG_BOUND].bound_k = IR_bnd_string_k;
1206			/*
1207			 * If base type of string has rep_as we want size of local type;
1208			 * since not known at compile time, stash type node address instead.
1209			 */
1210			if (type_p->type_structure.array->element_type->rep_as_type != NULL)
1211			{
1212				tup_p->flags |= IR_REP_AS;
1213				tup_p->arg[IR_ARG_TYPE2].type =
1214					type_p->type_structure.array->element_type;
1215			}
1216			else
1217			{
1218				/* Store octet size */
1219				tup_p->arg[IR_ARG_INT].int_val =
1220					type_p->type_structure.array->element_type->ndr_size;
1221			}
1222			/* Conformant string in conformant struct requires field number */
1223			if (AST_CONFORMANT_SET(type_p)
1224					&& IR_parent_scope(ctx_p) == IR_SCP_STRUCT)
1225				tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1226						(AST_field_n_t *)inst_p, (AST_field_n_t *)inst_p);
1227		}
1228		else
1229		{
1230			INTERNAL_ERROR("Invalid array bounds");
1231		}
1232	}
1233	return tup_p;
1234}
1235
1236/*
1237*  I R _ g e n _ l i m i t _ t u p s
1238 *
1239*  Generates a sequence of IR_op_limit_k tuples to describe the data limits
1240*  of an array.
1241*/
1242static IR_tup_n_t *IR_gen_limit_tups    /* Returns ptr to last tuple gen'd */
1243(
1244 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1245 AST_type_n_t        *type_p,    /* [in] Ptr to AST array type node */
1246 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
1247 IR_flags_t          flags       /* [in] IREP flags: IR_STRING set iff     */
1248 /*     object is a [string] array         */
1249 /*     IR_CS_CHAR iff base type [cs_char] */
1250)
1251{
1252	IR_tup_n_t          *tup_p = NULL;     /* Ptr to irep tuple and args */
1253	IR_tup_n_t          *lower_tup_p = NULL;
1254	AST_array_n_t       *array_p;   /* Ptr to AST array node */
1255	AST_array_index_n_t *index_p;   /* Ptr to AST array index node */
1256	AST_constant_n_t    *const_p;   /* Ptr to AST constant node */
1257	AST_field_attr_n_t  *fattr_p;   /* Ptr to AST field attributes node */
1258	int i;
1259	boolean string;
1260
1261	string = ((flags & IR_STRING) != 0);
1262	array_p = type_p->type_structure.array;
1263	fattr_p = (inst_p == NULL) ? NULL : inst_p->field_attrs;
1264
1265#if 0
1266	printf("IR_gen_limit_tups: called with flags %08x string %d\n", flags, string);
1267#endif
1268
1269	/*
1270	 * For each dimension in array:
1271	 */
1272	for (i = 0; i < array_p->index_count; i++)
1273	{
1274		index_p = &array_p->index_vec[i];
1275
1276		/*
1277		 * Generate IR_op_limit_k for lower data limit.
1278		 */
1279		const_p = index_p->lower_bound;
1280		lower_tup_p = tup_p = IR_gen_irep_tup(ctx_p, IR_op_limit_k);
1281
1282		if (fattr_p != NULL && fattr_p->first_is_vec != NULL
1283				&& fattr_p->first_is_vec[i].valid)
1284		{
1285			if (fattr_p->first_is_vec[i].constant)
1286			{
1287				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1288				tup_p->arg[IR_ARG_INT].int_val    = fattr_p->first_is_vec[i].ref.integer;
1289			}
1290			else
1291			{
1292				/* Pick up the referenced [first_is] field/parameter */
1293				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_first_is_k;
1294				if (IR_under_struct(ctx_p))
1295				{
1296					tup_p->arg[IR_ARG_FIELD].field = fattr_p->first_is_vec[i].ref.f_ref;
1297					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1298							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1299				}
1300				else
1301				{
1302					tup_p->arg[IR_ARG_PARAM].param = fattr_p->first_is_vec[i].ref.p_ref;
1303					tup_p->arg[IR_ARG_PFNUM].int_val =
1304						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1305				}
1306				/* Set the early correlation flag if referenced field occurs first */
1307				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1308					tup_p->flags |= IR_CF_EARLY;
1309			}
1310		}
1311		/*
1312		 * Note: AST array_rep_type for arrayified ptr can have constant lower
1313		 * bound even if an instance of it has a min_is attribute, so must test
1314		 * for min_is attribute first.
1315		 */
1316		else if (fattr_p != NULL && fattr_p->min_is_vec != NULL
1317				&& fattr_p->min_is_vec[i].valid)
1318		{
1319			if (fattr_p->min_is_vec[i].constant)
1320			{
1321				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1322				tup_p->arg[IR_ARG_INT].int_val   = fattr_p->first_is_vec[i].ref.integer;
1323			}
1324			else
1325			{
1326				/*
1327				 * No lower data limit but also not a fixed bound so dummy a
1328				 * first_is reference to point to the min_is variable.
1329				 */
1330				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_first_is_k;
1331				if (IR_under_struct(ctx_p))
1332				{
1333					tup_p->arg[IR_ARG_FIELD].field = fattr_p->min_is_vec[i].ref.f_ref;
1334					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1335							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1336				}
1337				else
1338				{
1339					tup_p->arg[IR_ARG_PARAM].param = fattr_p->min_is_vec[i].ref.p_ref;
1340					tup_p->arg[IR_ARG_PFNUM].int_val =
1341						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1342				}
1343				/* Set the early correlation flag if referenced field occurs first */
1344				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1345					tup_p->flags |= IR_CF_EARLY;
1346			}
1347		}
1348		else
1349		{
1350			/* Fixed lower data limit */
1351			tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1352			tup_p->arg[IR_ARG_INT].int_val   = const_p->value.int_val;
1353		}
1354
1355		/*
1356		 * Generate IR_op_limit_k for upper data limit.
1357		 */
1358		const_p = index_p->upper_bound;
1359		tup_p = IR_gen_irep_tup(ctx_p, IR_op_limit_k);
1360
1361		if (string)
1362		{
1363			/*
1364			 * Normally, the upper data limit is computed by the string length
1365			 * at runtime.  The upper data limit must contain string octet size.
1366			 * If in flat array rep of array of string, limit should have dummy
1367			 * value of 0 to simplify Interpreter logic for A,B pairs.
1368			 */
1369			if (ctx_p->in_flat_rep)
1370			{
1371				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1372				tup_p->arg[IR_ARG_INT].int_val = 0;
1373			}
1374			else
1375			{
1376				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_string_k;
1377				/*
1378				 * If base type of string has rep_as we want size of local type;
1379				 * since not known at compile time, stash type node address instead.
1380				 */
1381				if (type_p->type_structure.array->element_type->rep_as_type != NULL)
1382				{
1383					tup_p->flags |= IR_REP_AS;
1384					tup_p->arg[IR_ARG_TYPE2].type =
1385						type_p->type_structure.array->element_type;
1386				}
1387				else
1388				{
1389					/* Store octet size */
1390					tup_p->arg[IR_ARG_INT].int_val =
1391						type_p->type_structure.array->element_type->ndr_size;
1392				}
1393			}
1394		}
1395		else if (fattr_p != NULL && fattr_p->last_is_vec != NULL
1396				&& fattr_p->last_is_vec[i].valid)
1397		{
1398			if (fattr_p->last_is_vec[i].constant)
1399			{
1400				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1401				tup_p->arg[IR_ARG_INT].int_val   = fattr_p->last_is_vec[i].ref.integer;
1402			}
1403			else
1404			{
1405				/* Pick up the referenced [last_is] field/parameter */
1406				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_last_is_k;
1407				if (IR_under_struct(ctx_p))
1408				{
1409					tup_p->arg[IR_ARG_FIELD].field = fattr_p->last_is_vec[i].ref.f_ref;
1410					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1411							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1412				}
1413				else
1414				{
1415					tup_p->arg[IR_ARG_PARAM].param = fattr_p->last_is_vec[i].ref.p_ref;
1416					tup_p->arg[IR_ARG_PFNUM].int_val =
1417						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1418				}
1419				/* Set the early correlation flag if referenced field occurs first */
1420				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1421					tup_p->flags |= IR_CF_EARLY;
1422			}
1423		}
1424		else if (fattr_p != NULL && fattr_p->length_is_vec != NULL
1425				&& fattr_p->length_is_vec[i].valid)
1426		{
1427			if (fattr_p->length_is_vec[i].constant)
1428			{
1429				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1430				tup_p->arg[IR_ARG_INT].int_val   = 0;
1431				if (lower_tup_p->arg[IR_ARG_LIMIT].limit_k == IR_lim_fixed_k)
1432					tup_p->arg[IR_ARG_INT].int_val += lower_tup_p->arg[IR_ARG_INT].int_val;
1433				tup_p->arg[IR_ARG_INT].int_val  += fattr_p->length_is_vec[i].ref.integer - 1;
1434			}
1435			else
1436			{
1437				/* Pick up the referenced [length_is] field/parameter */
1438				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_length_is_k;
1439				tup_p->flags |= (flags & IR_CS_CHAR); /* OR in certain flags */
1440				if (IR_under_struct(ctx_p))
1441				{
1442					tup_p->arg[IR_ARG_FIELD].field = fattr_p->length_is_vec[i].ref.f_ref;
1443					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1444							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1445				}
1446				else
1447				{
1448					tup_p->arg[IR_ARG_PARAM].param = fattr_p->length_is_vec[i].ref.p_ref;
1449					tup_p->arg[IR_ARG_PFNUM].int_val =
1450						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1451				}
1452				tup_p->arg[IR_ARG_BOUND_XTRA].byt_val = fattr_p->length_is_vec[i].xtra_opcode;
1453				/* Set the early correlation flag if referenced field occurs first */
1454				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1455					tup_p->flags |= IR_CF_EARLY;
1456			}
1457
1458		}
1459		else if (const_p == NULL
1460                && fattr_p != NULL
1461                && fattr_p->max_is_vec != NULL
1462				&& fattr_p->max_is_vec[i].valid)
1463		{
1464			if (fattr_p->max_is_vec[i].constant)
1465			{
1466				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1467				tup_p->arg[IR_ARG_INT].int_val   = fattr_p->max_is_vec[i].ref.integer;
1468			}
1469			else
1470			{
1471				/*
1472				 * No upper data limit but also not a fixed bound so dummy a
1473				 * last_is reference to point to the max_is variable.
1474				 */
1475				tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_last_is_k;
1476				if (IR_under_struct(ctx_p))
1477				{
1478					tup_p->arg[IR_ARG_FIELD].field = fattr_p->max_is_vec[i].ref.f_ref;
1479					tup_p->arg[IR_ARG_PFNUM].int_val = IR_field_num(ctx_p,
1480							(AST_field_n_t *)inst_p, tup_p->arg[IR_ARG_FIELD].field);
1481				}
1482				else
1483				{
1484					tup_p->arg[IR_ARG_PARAM].param = fattr_p->max_is_vec[i].ref.p_ref;
1485					tup_p->arg[IR_ARG_PFNUM].int_val =
1486						IR_param_num(tup_p->arg[IR_ARG_PARAM].param);
1487				}
1488				/* Set the early correlation flag if referenced field occurs first */
1489				if (IR_bound_early(ctx_p, inst_p, tup_p->arg[IR_ARG_PFNUM].int_val))
1490					tup_p->flags |= IR_CF_EARLY;
1491			}
1492		}
1493		else if (const_p == NULL && fattr_p != NULL && fattr_p->size_is_vec != NULL
1494				&& fattr_p->size_is_vec[i].valid)
1495		{
1496			/*
1497			 * No upper data limit and upper bound is a size_is reference.
1498			 * Generate tuple that says upper data limit must be computed.
1499			 */
1500			tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_upper_conf_k;
1501		}
1502		else
1503		{
1504			/* Fixed upper data limit */
1505            assert (tup_p != NULL);
1506            assert (const_p != NULL);
1507            tup_p->arg[IR_ARG_LIMIT].limit_k = IR_lim_fixed_k;
1508            tup_p->arg[IR_ARG_INT].int_val   = const_p->value.int_val;
1509		}
1510	}
1511	return tup_p;
1512}
1513
1514/*
1515*  I R _ g e n _ f l a t _ a r r a y _ r e p
1516 *
1517*  Generates tuples for the flattened (multidimensional array) representation
1518*  of an array of array [of array]... reference.
1519*/
1520static void IR_gen_flat_array_rep
1521(
1522 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1523 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
1524 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
1525)
1526{
1527	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
1528	AST_type_n_t        *atype_p;   /* Ptr to an array type */
1529	AST_type_n_t        *btype_p;   /* Ptr to non-array base type */
1530	AST_array_n_t       *array_p;   /* Ptr to AST array node */
1531	unsigned short      dim;        /* Total number of dimensions */
1532
1533	dim = 0;
1534
1535	/*
1536	 * Compute the total number of dimensions and the non-array base type.
1537	 * Array of string array: The string array is considered the base type but
1538	 * the string dimension is still included in the bound (or limit) tuples.
1539	 */
1540	for (atype_p = type_p;
1541			atype_p->kind == AST_array_k
1542			&& !AST_STRING_SET(atype_p) && !AST_STRING0_SET(atype_p);
1543			atype_p = atype_p->type_structure.array->element_type)
1544	{
1545		array_p = atype_p->type_structure.array;
1546		dim += array_p->index_count;
1547	}
1548	btype_p = atype_p;
1549	if (btype_p->kind == AST_array_k)
1550		dim++;
1551
1552	/*
1553	 * Generate IR_op_*_array_k tuple and maintain scope context.
1554	 */
1555	ctx_p->in_flat_rep = TRUE;
1556	tup_p = IR_gen_array_tup(ctx_p, type_p, inst_p, 0);
1557	IR_process_tup(ctx_p, tup_p);
1558
1559	/*
1560	 * Generate IR_op_array_bounds_k.
1561	 */
1562	tup_p = IR_gen_irep_tup(ctx_p, IR_op_array_bounds_k);
1563	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
1564	tup_p->arg[IR_ARG_TUP].tup     = NULL;
1565	tup_p->arg[IR_ARG_TYPE].type   = type_p;
1566	tup_p->arg[IR_ARG_INT].int_val = dim;
1567
1568	/*
1569	 * Generate IR_op_bound_k pair for each dimension.
1570	 * If array is varying, generate IR_op_limit_k pair for each dimension.
1571	 */
1572	for (atype_p = type_p;
1573			atype_p->kind == AST_array_k;
1574			atype_p = atype_p->type_structure.array->element_type)
1575	{
1576		/*
1577		 * After toplevel array, null instance ptr since its attributes
1578		 * can only apply to the toplevel array.
1579		 */
1580		IR_gen_bound_tups(ctx_p, atype_p, (atype_p == type_p) ? inst_p : NULL,
1581				(AST_STRING_SET(atype_p) || AST_STRING0_SET(atype_p)) ?
1582				IR_STRING : 0);
1583	}
1584
1585	if (inst_p != NULL && AST_VARYING_SET(inst_p))
1586	{
1587		for (atype_p = type_p;
1588				atype_p->kind == AST_array_k;
1589				atype_p = atype_p->type_structure.array->element_type)
1590		{
1591			/*
1592			 * After toplevel array, null instance ptr since its attributes
1593			 * can only apply to the toplevel array.
1594			 */
1595			IR_flags_t flags;
1596
1597			if (AST_STRING_SET(atype_p) || AST_STRING0_SET(atype_p))
1598				flags = IR_STRING;
1599			else
1600				flags = 0;
1601			IR_gen_limit_tups(ctx_p, atype_p,
1602					(atype_p == type_p) ? inst_p : NULL,
1603					flags);
1604		}
1605	}
1606
1607	/*
1608	 * Recurse to generate tuples for the non-array base element type.
1609	 */
1610	ctx_p->in_flat_rep = FALSE;
1611	IR_gen_type_rep(ctx_p, btype_p, NULL, 0);
1612
1613	/*
1614	 * Generate IR_op_array_end_k tuple and maintain scope context.
1615	 */
1616	tup_p = IR_gen_irep_tup(ctx_p, IR_op_array_end_k);
1617	tup_p->arg[IR_ARG_TYPE].type = type_p;
1618	IR_process_tup(ctx_p, tup_p);
1619}
1620
1621/*
1622*  I R _ g e n _ a r r a y _ r e p
1623 *
1624*  Generates tuples for an array reference.
1625*/
1626static void IR_gen_array_rep
1627(
1628 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1629 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
1630 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
1631 IR_flags_t          flags       /* [in] IREP flags: IR_STRING set iff */
1632 /*      object is a [string] array    */
1633)
1634{
1635	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
1636	AST_array_n_t       *array_p;   /* Ptr to AST array node */
1637	AST_type_n_t        *btype_p;   /* Ptr to AST array base type node */
1638	IR_tup_n_t *full_tup_p = NULL, *flat_tup_p; /* Saved tuple ptrs */
1639	boolean     array_of_array;     /* TRUE => toplevel of array of array... */
1640	boolean     toplevel_array;     /* TRUE => array parameter */
1641
1642	array_p = type_p->type_structure.array;
1643	btype_p = array_p->element_type;
1644
1645	/*
1646	 * If base type is [cs_char], set flag used in array tuples.
1647	 */
1648	if (btype_p->cs_char_type != NULL)
1649		flags |= IR_CS_CHAR;
1650
1651	/*
1652	 * If toplevel array, generate IR_op_passed_by_ref_k or IR_op_*_ptr_k tuple
1653	 * since arrays are implicitly passed by reference.
1654	 */
1655	if (IR_cur_scope(ctx_p) == IR_SCP_TOPLEVEL)
1656	{
1657		tup_p = IR_gen_ptr_tup(ctx_p, type_p, inst_p);
1658		IR_process_tup(ctx_p, tup_p);
1659		toplevel_array = TRUE;
1660	}
1661	else
1662		toplevel_array = FALSE;
1663
1664	/*
1665	 * If at toplevel of an array of array, generate IR_op_full_array_k tuple.
1666	 */
1667	array_of_array = (IR_cur_scope(ctx_p) != IR_SCP_ARRAY
1668			&& btype_p->kind == AST_array_k);
1669	if (array_of_array)
1670		full_tup_p = IR_gen_irep_tup(ctx_p, IR_op_full_array_k);
1671
1672	/*
1673	 * Generate IR_op_*_array_k tuple and maintain scope context.
1674	 */
1675	tup_p = IR_gen_array_tup(ctx_p, type_p, inst_p, flags);
1676	IR_process_tup(ctx_p, tup_p);
1677
1678	/*
1679	 * Generate block of IR_op_declare_k helper variables into operation tups.
1680	 */
1681
1682	/*
1683	 * Generate IR_op_array_bounds_k.
1684	 */
1685	tup_p = IR_gen_irep_tup(ctx_p, IR_op_array_bounds_k);
1686	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
1687	tup_p->arg[IR_ARG_TUP].tup     = NULL;
1688	tup_p->arg[IR_ARG_TYPE].type   = type_p;
1689	tup_p->arg[IR_ARG_INT].int_val = array_p->index_count;
1690
1691	/*
1692	 * Generate IR_op_bound_k pair for each dimension.
1693	 * If array is varying, generate IR_op_limit_k pair for each dimension.
1694	 */
1695	IR_gen_bound_tups(ctx_p, type_p, inst_p, flags);
1696
1697	if ((flags & IR_STRING) || (inst_p != NULL && AST_VARYING_SET(inst_p)))
1698		IR_gen_limit_tups(ctx_p, type_p, inst_p, flags);
1699
1700	/*
1701	 * Recurse to generate tuples for the array element type.
1702	 */
1703	IR_gen_type_rep(ctx_p, btype_p, NULL, 0);
1704
1705	/*
1706	 * Generate IR_op_array_end_k tuple and maintain scope context.
1707	 */
1708	tup_p = IR_gen_irep_tup(ctx_p, IR_op_array_end_k);
1709	tup_p->arg[IR_ARG_TYPE].type = type_p;
1710	IR_process_tup(ctx_p, tup_p);
1711
1712	/*
1713	 * If the toplevel of an array of array construct,
1714	 * generate the fully flattened representation.
1715	 */
1716	if (array_of_array)
1717	{
1718		/*
1719		 * Generate IR_op_flat_array_k tuple.
1720		 * Generate flattened representation of the array of array.
1721		 */
1722		flat_tup_p = IR_gen_irep_tup(ctx_p, IR_op_flat_array_k);
1723		IR_gen_flat_array_rep(ctx_p, type_p, inst_p);
1724
1725		/*
1726		 * Generate IR_op_full_array_end_k.
1727		 * Patch IR_op_full_array_k to point at IR_op_flat_array_k.
1728		 * Patch IR_op_flat_array_k to point at IR_op_full_array_end_k.
1729		 */
1730		tup_p = IR_gen_irep_tup(ctx_p, IR_op_full_array_end_k);
1731		full_tup_p->arg[IR_ARG_TUP].tup = flat_tup_p;
1732		flat_tup_p->arg[IR_ARG_TUP].tup = tup_p;
1733	}
1734
1735	/*
1736	 * If toplevel array, generate IR_op_pointee_end_k tuple and maintain ctx.
1737	 */
1738	if (toplevel_array)
1739	{
1740		tup_p = IR_gen_irep_tup(ctx_p, IR_op_pointee_end_k);
1741		tup_p->arg[IR_ARG_TYPE].type = type_p;
1742		IR_process_tup(ctx_p, tup_p);
1743	}
1744}
1745
1746/*
1747*  I R _ g e n _ m u l t i d _ a o s
1748 *
1749*  Generates tuples for the special case of a multidimensional array which
1750*  represents an array of strings.  This is only possible using [v1_string].
1751*  To handle this anomolous case, create a standin array of string array
1752*  representation of the multidimensional array.
1753*/
1754static void IR_gen_multid_aos
1755(
1756 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1757 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
1758 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
1759)
1760{
1761	AST_array_n_t       *array_p;       /* Ptr to AST array node */
1762	AST_array_index_n_t *index_p;       /* Ptr to AST array index node */
1763	AST_type_n_t        *new_type_p;    /* Ptr to standin array type node */
1764	AST_array_n_t       *new_array_p;   /* Ptr to standin array node */
1765	AST_array_index_n_t *new_index_p;   /* Ptr to standin array index node */
1766	AST_type_n_t        *base_type_p;   /* Ptr to standin base type node */
1767	AST_array_n_t       *base_array_p;  /* Ptr to standin base array node */
1768	AST_array_index_n_t *base_index_p;  /* Ptr to standin base array idx node */
1769	int i;
1770
1771	array_p = type_p->type_structure.array;
1772	index_p = array_p->index_vec;
1773
1774	/*
1775	 * From the N-dim array type with [v1_string], create an (N-1)-dim
1776	 * array type, without [v1_string], whose base type is array.
1777	 */
1778	new_type_p = AST_type_node(null_parser_location, AST_array_k);
1779	base_type_p = AST_type_node(null_parser_location, AST_array_k);
1780	new_array_p = AST_array_node(null_parser_location, base_type_p);
1781	new_index_p = AST_array_index_node(null_parser_location, array_p->index_count-1);
1782
1783	new_type_p->name = type_p->name;
1784	new_type_p->type_structure.array = new_array_p;
1785	new_type_p->flags = type_p->flags;
1786	new_type_p->fe_info->flags = type_p->fe_info->flags;
1787	AST_CLR_STRING0(new_type_p);
1788	if (inst_p != NULL) AST_CLR_STRING0(inst_p);
1789	new_array_p->index_count = array_p->index_count-1;
1790	new_array_p->index_vec   = new_index_p;
1791
1792	for (i = 1; i < array_p->index_count; i++)
1793	{
1794		new_index_p->flags       = index_p->flags;
1795		new_index_p->lower_bound = index_p->lower_bound;
1796		new_index_p->upper_bound = index_p->upper_bound;
1797		new_index_p++;
1798		index_p++;
1799	}
1800	/* index_p left pointing at index node for minor (string) dimension */
1801
1802	/*
1803	 * Set up the array base type with the [v1_string] attribute.
1804	 */
1805	base_array_p = AST_array_node(null_parser_location, array_p->element_type);
1806	base_index_p = AST_array_index_node(null_parser_location, 1);
1807
1808	base_type_p->type_structure.array = base_array_p;
1809	AST_SET_STRING0(base_type_p);
1810	base_type_p->fe_info->flags = type_p->fe_info->flags;
1811	base_array_p->index_count = 1;
1812	base_array_p->index_vec   = base_index_p;
1813	base_index_p->flags       = index_p->flags;
1814	base_index_p->lower_bound = index_p->lower_bound;
1815	base_index_p->upper_bound = index_p->upper_bound;
1816
1817	/*
1818	 * Now create tuples for the newly constructed array of string type.
1819	 */
1820	IR_gen_type_rep(ctx_p, new_type_p, inst_p, 0);
1821}
1822
1823/*
1824*  I R _ g e n _ p o i n t e r _ r e p
1825 *
1826*  Generates tuples for a pointer/pointee reference.
1827*/
1828static void IR_gen_pointer_rep
1829(
1830 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1831 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
1832 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
1833)
1834{
1835	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
1836	AST_type_n_t    *arr_rep_type_p;/* Array rep type for arrayified pointer */
1837	AST_type_n_t     *ptee_type_p;  /* Pointee type */
1838	AST_instance_n_t *ptee_inst_p;  /* Instance ptr for pointee rep */
1839
1840	/*
1841	 * If pointee type is handle_t without transmit_as, no wire rep so return.
1842	 */
1843	ptee_type_p = type_p->type_structure.pointer->pointee_type;
1844	if (ptee_type_p->kind == AST_handle_k && ptee_type_p->xmit_as_type == NULL)
1845		return;
1846
1847	/*
1848	 * Generate IR_op_*_ptr_k and maintain scope context.
1849	 */
1850
1851    /*
1852     * Centeris change:
1853     *   We need to pass inst_p recursively down the
1854     *   call chain so that we can look up the discriminator
1855     *   field for non-encapsulated unions; this could occur
1856     *   after several layers of pointer indirection.  Normally
1857     *   doing this would not make sense because it violates the
1858     *   invariant that inst_p->type == type_p; we should pass
1859     *   NULL for inst_p instead, but this would break unions.
1860     *   Because IR_gen_ptr_tup prefers using inst_p to check
1861     *   for attributes when possible, passing an inst_p that
1862     *   violates the invariant can cause it to generate bogus
1863     *   pointer attributes in the intermediate representation.
1864     *   In order to work around this, we recursively pass ourselves
1865     *   inst_p rather than NULL, but pass NULL to IR_gen_ptr_tup
1866     *   when the invariant is not met.  Hopefully this does not
1867     *   break anything else.
1868     *
1869     *   -- Brian
1870     */
1871
1872    if (inst_p && inst_p->type == type_p)
1873        tup_p = IR_gen_ptr_tup(ctx_p, type_p, inst_p);
1874    else
1875        tup_p = IR_gen_ptr_tup(ctx_p, type_p, NULL);
1876	IR_process_tup(ctx_p, tup_p);
1877
1878	/*
1879	 * If pointer is arrayified, generate array rep.
1880	 * Otherwise, generate rep of pointee type.
1881	 */
1882	if (tup_p->flags & IR_ARRAYIFIED_PTR)
1883	{
1884		/* Propagate attributes to array rep type if necessary */
1885		arr_rep_type_p = ptee_type_p->array_rep_type;
1886		if (FE_TEST(ptee_type_p->fe_info->flags, FE_HAS_PTR))
1887			FE_SET(arr_rep_type_p->fe_info->flags, FE_HAS_PTR);
1888
1889		/*
1890		 * Parameter or field instance skips across pointer to array rep.
1891		 * NOTE: The need for the 'flags' argument to IR_gen_type_rep stems
1892		 * from here: The [string] attribute cannot be put on an array_rep_type
1893		 * since other arrayified types without [string] can have the same
1894		 * array_rep_type node.  If this reference to the array_rep_type is
1895		 * stringified, it is captured in and passed thru the tuple flags.
1896		 */
1897		IR_gen_type_rep(ctx_p, arr_rep_type_p, inst_p, tup_p->flags);
1898	}
1899	else
1900	{
1901		/*
1902		 * Generate rep of pointee type.  Note that if pointer is a toplevel
1903		 * pointer whose pointee type is not a pointer or array, the instance
1904		 * node address "skips across" the pointer to the pointee rep.  This
1905		 * is also true for ANY poiner to a non-encapsulated union, where the
1906		 * switch information which hangs off the instance node is needed when
1907		 * processing the union type.
1908		 */
1909		if ( (ptee_type_p->kind == AST_disc_union_k && ptee_type_p->
1910					type_structure.disc_union->discrim_name == NAMETABLE_NIL_ID)
1911#if 1
1912/* Centeris change:
1913     Handle the case of two levels of indirection to a union.  This should
1914     be fixed to handle any number of levels.  A better solution would be to
1915     always pass inst_p, and add checks for the inst_p->type == type_p
1916     invariant everywhere else.
1917*/
1918             ||
1919             (ptee_type_p->kind == AST_pointer_k &&
1920              ptee_type_p->type_structure.pointer->pointee_type->kind ==
1921              AST_disc_union_k)
1922#endif
1923				||
1924				((ptee_type_p->kind != AST_pointer_k || ptee_type_p->
1925				  type_structure.pointer->pointee_type->kind == AST_void_k
1926				  || (ptee_type_p->type_structure.pointer->pointee_type->kind
1927					  == AST_structure_k && AST_CONTEXT_RD_SET(ptee_type_p)))
1928				 && ptee_type_p->kind != AST_array_k
1929				 && IR_parent_scope(ctx_p) == IR_SCP_TOPLEVEL) )
1930			ptee_inst_p = inst_p;
1931		else
1932			ptee_inst_p = NULL;
1933		IR_gen_type_rep(ctx_p, ptee_type_p, ptee_inst_p, 0);
1934	}
1935
1936	/*
1937	 * Generate IR_op_pointee_end_k tuple and maintain scope context.
1938	 */
1939	tup_p = IR_gen_irep_tup(ctx_p, IR_op_pointee_end_k);
1940	tup_p->arg[IR_ARG_TYPE].type = type_p;
1941	IR_process_tup(ctx_p, tup_p);
1942}
1943
1944/*
1945*  I R _ g e n _ x m i t _ a s _ t y p e _ r e p
1946 *
1947*  Generates tuples for a [transmit_as] type definition.
1948*/
1949static void IR_gen_xmit_as_type_rep
1950(
1951 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
1952 AST_type_n_t        *type_p,    /* [io] AST type node */
1953 AST_instance_n_t    *inst_p ATTRIBUTE_UNUSED    /* [in] Ptr to AST instance node */
1954)
1955{
1956	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
1957
1958	/*
1959	 * Generate IR_op_transmit_as_k tuple and maintain scope context.
1960	 */
1961	tup_p = IR_gen_irep_tup(ctx_p, IR_op_transmit_as_k);
1962	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
1963	tup_p->arg[IR_ARG_TUP].tup    = NULL;
1964	tup_p->arg[IR_ARG_TYPE].type  = type_p->xmit_as_type;   /* transmissible */
1965	tup_p->arg[IR_ARG_TYPE2].type = type_p;                 /* presented     */
1966
1967	IR_process_tup(ctx_p, tup_p);
1968
1969	/*
1970	 * Generate part of IR_op_call_k IR_op_call_param_k... tuples.
1971	 */
1972
1973	/*
1974	 * Recurse to generate tuples for the transmissible type.
1975	 */
1976	IR_gen_type_rep(ctx_p, type_p->xmit_as_type, NULL, 0);
1977
1978	/*
1979	 * Generate rest of IR_op_call_k IR_op_call_param_k... tuples.
1980	 */
1981
1982	/*
1983	 * Generate IR_op_transmit_end_k tuple and maintain scope context.
1984	 */
1985	tup_p = IR_gen_irep_tup(ctx_p, IR_op_transmit_end_k);
1986	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
1987	tup_p->arg[IR_ARG_TUP].tup    = NULL;
1988	tup_p->arg[IR_ARG_TYPE].type  = type_p->xmit_as_type;   /* transmissible */
1989	tup_p->arg[IR_ARG_TYPE2].type = type_p;                 /* presented     */
1990
1991	IR_process_tup(ctx_p, tup_p);
1992}
1993
1994/*
1995*  I R _ g e n _ x m i t _ a s _ r e p
1996 *
1997*  Generates tuples to reference a [transmit_as] type and if the
1998*  type has not yet been processed, generates tuples for the type.
1999*/
2000static void IR_gen_xmit_as_rep
2001(
2002 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2003 AST_type_n_t        *type_p,    /* [io] AST type node */
2004 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
2005)
2006{
2007	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2008
2009	/*
2010	 * Generate block of IR_op_declare_k variables into operation init_tups.
2011	 */
2012
2013	/*
2014	 * Generate indirect reference tuple.
2015	 */
2016	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
2017	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2018	tup_p->arg[IR_ARG_TYPE].type = type_p;
2019	tup_p->arg[IR_ARG_INST].inst = inst_p;
2020
2021	/*
2022	 * Generate tuples for [transmit_as] type if not yet done.
2023	 */
2024	if (type_p->ir_info == NULL)
2025	{
2026		/* Initialize type IREP info. */
2027		IR_INIT_NODE(type_p);
2028
2029		/*
2030		 * Maintain scope context.  This will cause subsequent tuple insertions
2031		 * to be into the type node's tuple list until another indirect type
2032		 * reference is generated or the end of this type reference.
2033		 */
2034		IR_process_tup(ctx_p, tup_p);
2035
2036		/*
2037		 * Generate tuples for [transmit_as] type.  When control returns
2038		 * from this routine, the indirect type scope will have been popped.
2039		 */
2040		IR_gen_xmit_as_type_rep(ctx_p, type_p, inst_p);
2041	}
2042}
2043
2044/*
2045*  I R _ g e n _ r e p r _ a s _ t y p e _ r e p
2046 *
2047*  Generates tuples for a [represent_as] type definition.
2048*/
2049static void IR_gen_repr_as_type_rep
2050(
2051 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2052 AST_type_n_t        *type_p,    /* [io] AST type node */
2053 AST_instance_n_t    *inst_p ATTRIBUTE_UNUSED    /* [in] Ptr to AST instance node */
2054)
2055{
2056	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2057
2058	/*
2059	 * Generate IR_op_represent_as_k tuple and maintain scope context.
2060	 */
2061	tup_p = IR_gen_irep_tup(ctx_p, IR_op_represent_as_k);
2062	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
2063	tup_p->arg[IR_ARG_TUP].tup    = NULL;
2064	tup_p->arg[IR_ARG_TYPE].type  = type_p;     /* network type */
2065	tup_p->arg[IR_ARG_REP_AS].rep_as = type_p->rep_as_type;
2066
2067	IR_process_tup(ctx_p, tup_p);
2068
2069	/*
2070	 * Generate part of IR_op_call_k IR_op_call_param_k... tuples.
2071	 */
2072
2073	/*
2074	 * Recurse to generate tuples for the network type.
2075	 */
2076	IR_gen_type_rep(ctx_p, type_p, NULL, 0);
2077
2078	/*
2079	 * Generate rest of IR_op_call_k IR_op_call_param_k... tuples.
2080	 */
2081
2082	/*
2083	 * Generate IR_op_represent_end_k tuple and maintain scope context.
2084	 */
2085	tup_p = IR_gen_irep_tup(ctx_p, IR_op_represent_end_k);
2086	/*** TBS: IR_ARG_TUP argument points to helper variable tuples ***/
2087	tup_p->arg[IR_ARG_TUP].tup    = NULL;
2088	tup_p->arg[IR_ARG_TYPE].type  = type_p;     /* network type */
2089	tup_p->arg[IR_ARG_REP_AS].rep_as = type_p->rep_as_type;
2090
2091	IR_process_tup(ctx_p, tup_p);
2092}
2093
2094/*
2095*  I R _ g e n _ r e p r _ a s _ r e p
2096 *
2097*  Generates tuples for a [represent_as] type.
2098*/
2099static void IR_gen_repr_as_rep
2100(
2101 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2102 AST_type_n_t        *type_p,    /* [io] AST type node */
2103 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
2104)
2105{
2106	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2107	AST_rep_as_n_t      *rep_p;     /* Ptr to AST represent_as node */
2108
2109	rep_p = type_p->rep_as_type;
2110
2111	/*
2112	 * Generate block of IR_op_declare_k variables into operation init_tups.
2113	 */
2114
2115	/*
2116	 * Generate indirect reference tuple.  The flag indicates that the tuples
2117	 * will hang off the rep_as node instead of the type node itself.
2118	 */
2119	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
2120	tup_p->flags |= IR_REP_AS;
2121	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2122	tup_p->arg[IR_ARG_TYPE].type = type_p;
2123	tup_p->arg[IR_ARG_INST].inst = inst_p;
2124
2125	/*
2126	 * Generate tuples for [represent_as] type if not yet done.
2127	 */
2128	if (rep_p->ir_info == NULL)
2129	{
2130		/* Initialize type IREP info. */
2131		IR_INIT_NODE(rep_p);
2132
2133		/*
2134		 * Maintain scope context.  This will cause subsequent tuple insertions
2135		 * to be into the rep_as node's tuple list until another indirect type
2136		 * reference is generated or the end of this reference.
2137		 */
2138		IR_process_tup(ctx_p, tup_p);
2139
2140		/*
2141		 * Generate tuples for [represent_as] type.  When control returns
2142		 * from this routine, the indirect type scope will have been popped.
2143		 */
2144		IR_gen_repr_as_type_rep(ctx_p, type_p, inst_p);
2145	}
2146}
2147
2148/*
2149*  I R _ g e n _ c s _ c h a r _ t y p e _ r e p
2150 *
2151*  Generates tuples for a [cs_char] type definition.
2152*/
2153static void IR_gen_cs_char_type_rep
2154(
2155 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2156 AST_type_n_t        *type_p,    /* [io] AST type node */
2157 AST_instance_n_t    *inst_p ATTRIBUTE_UNUSED    /* [in] Ptr to AST instance node */
2158)
2159{
2160	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2161
2162	/*
2163	 * Generate IR_op_cs_char_k tuple and maintain scope context.
2164	 */
2165	tup_p = IR_gen_irep_tup(ctx_p, IR_op_cs_char_k);
2166	tup_p->arg[IR_ARG_TUP].tup    = NULL;       /* currently not used */
2167	tup_p->arg[IR_ARG_TYPE].type  = type_p;     /* network type */
2168	tup_p->arg[IR_ARG_CS_CHAR].cs_char = type_p->cs_char_type;
2169
2170	IR_process_tup(ctx_p, tup_p);
2171
2172	/*
2173	 * Recurse to generate tuples for the network type.
2174	 */
2175	IR_gen_type_rep(ctx_p, type_p, NULL, 0);
2176
2177	/*
2178	 * Generate IR_op_cs_char_end_k tuple and maintain scope context.
2179	 */
2180	tup_p = IR_gen_irep_tup(ctx_p, IR_op_cs_char_end_k);
2181	tup_p->arg[IR_ARG_TUP].tup    = NULL;       /* currently not used */
2182	tup_p->arg[IR_ARG_TYPE].type  = type_p;     /* network type */
2183	tup_p->arg[IR_ARG_CS_CHAR].cs_char = type_p->cs_char_type;
2184
2185	IR_process_tup(ctx_p, tup_p);
2186}
2187
2188/*
2189*  I R _ g e n _ c s _ c h a r _ r e p
2190 *
2191*  Generates tuples for a [cs_char] type.
2192*/
2193static void IR_gen_cs_char_rep
2194(
2195 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2196 AST_type_n_t        *type_p,    /* [io] AST type node */
2197 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
2198)
2199{
2200	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2201	AST_cs_char_n_t     *ichar_p;   /* Ptr to AST cs_char node */
2202
2203	ichar_p = type_p->cs_char_type;
2204
2205	/*
2206	 * Generate indirect reference tuple.  The flag indicates that the tuples
2207	 * will hang off the cs_char node instead of the type node itself.
2208	 */
2209	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
2210	tup_p->flags |= IR_CS_CHAR;
2211	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2212	tup_p->arg[IR_ARG_TYPE].type = type_p;
2213	tup_p->arg[IR_ARG_INST].inst = inst_p;
2214
2215	/*
2216	 * Generate tuples for [cs_char] type if not yet done.
2217	 */
2218	if (ichar_p->ir_info == NULL)
2219	{
2220		/* Initialize type IREP info. */
2221		IR_INIT_NODE(ichar_p);
2222
2223		/*
2224		 * Maintain scope context.  This will cause subsequent tuple insertions
2225		 * to be into the cs_char node's tuple list until another indirect type
2226		 * reference is generated or the end of this reference.
2227		 */
2228		IR_process_tup(ctx_p, tup_p);
2229
2230		/*
2231		 * Generate tuples for [cs_char] type.  When control returns
2232		 * from this routine, the indirect type scope will have been popped.
2233		 */
2234		IR_gen_cs_char_type_rep(ctx_p, type_p, inst_p);
2235	}
2236}
2237
2238/*
2239*  I R _ g e n _ p i p e _ t y p e _ r e p
2240 *
2241*  Generates tuples for a pipe type definition.
2242*/
2243static void IR_gen_pipe_type_rep
2244(
2245 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2246 AST_type_n_t        *type_p,    /* [io] AST type node */
2247 AST_instance_n_t    *inst_p ATTRIBUTE_UNUSED    /* [in] Ptr to AST instance node */
2248)
2249{
2250	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2251	AST_pipe_n_t        *pipe_p;    /* Ptr to AST pipe node */
2252
2253	pipe_p = type_p->type_structure.pipe;
2254
2255	/*
2256	 * Generate IR_op_pipe_begin_k tuple and maintain scope context.
2257	 */
2258	tup_p = IR_gen_irep_tup(ctx_p, IR_op_pipe_begin_k);
2259	tup_p->arg[IR_ARG_TYPE].type  = type_p;             /* pipe type */
2260	tup_p->arg[IR_ARG_TYPE2].type = pipe_p->base_type;  /* pipe base type */
2261	IR_process_tup(ctx_p, tup_p);
2262
2263	/*
2264	 * Recurse to generate tuples for the pipe base type.
2265	 */
2266	IR_gen_type_rep(ctx_p, pipe_p->base_type, NULL, 0);
2267
2268	/*
2269	 * Generate IR_op_pipe_end_k tuple and maintain scope context.
2270	 */
2271	tup_p = IR_gen_irep_tup(ctx_p, IR_op_pipe_end_k);
2272	tup_p->arg[IR_ARG_TYPE].type  = type_p;             /* pipe type */
2273	tup_p->arg[IR_ARG_TYPE2].type = pipe_p->base_type;  /* pipe base type */
2274	IR_process_tup(ctx_p, tup_p);
2275}
2276
2277/*
2278*  I R _ g e n _ p i p e _ r e p
2279 *
2280*  Generates tuples to reference a pipe type and if the type has
2281*  not yet been processed, generates tuples for the type.
2282*/
2283static void IR_gen_pipe_rep
2284(
2285 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2286 AST_type_n_t        *type_p,    /* [io] AST type node */
2287 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
2288)
2289{
2290	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2291
2292	/*
2293	 * Generate indirect reference tuple.
2294	 */
2295	tup_p = IR_gen_irep_tup(ctx_p, IR_op_type_indirect_k);
2296	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2297	tup_p->arg[IR_ARG_TYPE].type = type_p;
2298	tup_p->arg[IR_ARG_INST].inst = inst_p;
2299
2300	/*
2301	 * Generate tuples for pipe type if not yet done.
2302	 */
2303	if (type_p->ir_info == NULL)
2304	{
2305		/* Initialize type IREP info. */
2306		IR_INIT_NODE(type_p);
2307
2308		/*
2309		 * Maintain scope context.  This will cause subsequent tuple insertions
2310		 * to be into the type node's tuple list until another indirect type
2311		 * reference is generated or the end of this type reference.
2312		 */
2313		IR_process_tup(ctx_p, tup_p);
2314
2315		/*
2316		 * Generate tuples for pipe type.  When control returns
2317		 * from this routine, the indirect type scope will have been popped.
2318		 */
2319		IR_gen_pipe_type_rep(ctx_p, type_p, inst_p);
2320	}
2321}
2322
2323/*
2324*  I R _ g e n _ c o n t e x t _ r e p
2325 *
2326*  Generates tuples for a [context_handle] type.
2327*/
2328static void IR_gen_context_rep
2329(
2330 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2331 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
2332 AST_parameter_n_t   *param_p    /* [in] Ptr to AST parameter node */
2333)
2334{
2335	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2336
2337	/*
2338	 * Generate IR_op_context_handle_k tuple.
2339	 */
2340	tup_p = IR_gen_irep_tup(ctx_p, IR_op_context_handle_k);
2341	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2342	tup_p->arg[IR_ARG_TYPE].type = type_p;
2343	tup_p->arg[IR_ARG_PARAM].param = param_p;
2344}
2345
2346/*
2347*  I R _ g e n _ s c a l a r _ r e p
2348 *
2349*  Generates tuples for a scalar type.
2350*/
2351static void IR_gen_scalar_rep
2352(
2353 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2354 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
2355 AST_instance_n_t    *inst_p     /* [in] Ptr to AST instance node */
2356)
2357{
2358	IR_tup_n_t          *tup_p;     /* Ptr to irep tuple and args */
2359
2360	/*
2361	 * We support bounded scalars ([range] attribute) but prefixing
2362	 * the scalar opcode with one indicating the scalar boundaries.
2363 	 */
2364	if (inst_p != NULL &&
2365            inst_p->field_attrs != NULL &&
2366            inst_p->field_attrs->range != NULL)
2367	{
2368		tup_p = IR_gen_irep_tup(ctx_p, IR_op_range_k);
2369
2370		tup_p->arg[IR_ARG_TUP].tup = NULL;
2371		tup_p->arg[IR_ARG_TYPE].type = type_p;
2372		tup_p->arg[IR_ARG_INT].int_val = inst_p->field_attrs->range->value[0];
2373		tup_p->arg[IR_ARG_BOUND_XTRA].int_val = inst_p->field_attrs->range->value[1];
2374	}
2375
2376	/*
2377	 * Generate IR_op_marshall_k tuple.
2378	 */
2379	tup_p = IR_gen_irep_tup(ctx_p, IR_op_marshall_k);
2380	/*
2381	 * Set a flag if this scalar parameter or field is used as a field
2382	 * attribute for an array of [cs_char] type.
2383	 */
2384	if (inst_p != NULL
2385			&& FE_TEST(inst_p->fe_info->flags, FE_USED_AS_CS_FLD_ATTR))
2386		tup_p->flags |= IR_CS_CHAR;
2387	tup_p->arg[IR_ARG_EXPR].expr = NULL;
2388	tup_p->arg[IR_ARG_TYPE].type = type_p;
2389	tup_p->arg[IR_ARG_INST].inst = inst_p;
2390}
2391
2392/*
2393*  I R _ g e n _ t y p e _ r e p
2394 *
2395*  Generates tuples for a type reference.
2396*/
2397static void IR_gen_type_rep
2398(
2399 IR_scope_ctx_t      *ctx_p,     /* [io] Scope context */
2400 AST_type_n_t        *type_p,    /* [in] Ptr to AST type node */
2401 AST_instance_n_t    *inst_p,    /* [in] Ptr to AST instance node */
2402 IR_flags_t          flags       /* [in] IREP flags */
2403)
2404{
2405	/*
2406	 * Must test for [represent_as] before [transmit_as].  Types with both
2407	 * attributes have the transmit_as tuples nested within the rep_as tuples.
2408	 * Both are accessed indirectly, but the represent_as tuples hang off the
2409	 * rep_as node instead of the type node.  If we're already in a rep_as
2410	 * scope, this is the second time through for the same type, so ignore the
2411	 * rep_as this time around.  We know this since rep_as cannot be nested.
2412	 */
2413	if (type_p->rep_as_type != NULL && !IR_in_rep_as(ctx_p)
2414			&& type_p->kind != AST_handle_k)
2415	{
2416		IR_gen_repr_as_rep(ctx_p, type_p, inst_p);
2417		return;
2418	}
2419
2420	if (type_p->xmit_as_type != NULL)
2421	{
2422		IR_gen_xmit_as_rep(ctx_p, type_p, inst_p);
2423		return;
2424	}
2425
2426	/*
2427	 * If we're already in a cs_char scope, this is the second time through for
2428	 * the same type (this time to process the network type), so ignore the
2429	 * cs_char this time around.  We know this since cs_char cannot be nested.
2430	 */
2431	if (type_p->cs_char_type != NULL && !IR_in_cs_char(ctx_p))
2432	{
2433		IR_gen_cs_char_rep(ctx_p, type_p, inst_p);
2434		return;
2435	}
2436
2437	/* Note: no special action for [handle], the type is transmissible. */
2438
2439	switch(type_p->kind)
2440	{
2441		case AST_boolean_k:
2442		case AST_byte_k:
2443		case AST_character_k:
2444		case AST_small_integer_k:
2445		case AST_short_integer_k:
2446		case AST_long_integer_k:
2447		case AST_hyper_integer_k:
2448		case AST_small_unsigned_k:
2449		case AST_short_unsigned_k:
2450		case AST_long_unsigned_k:
2451		case AST_hyper_unsigned_k:
2452		case AST_short_float_k:
2453		case AST_long_float_k:
2454		case AST_enum_k:
2455			IR_gen_scalar_rep(ctx_p, type_p, inst_p);
2456			break;
2457
2458		case AST_void_k:
2459			/* void valid on function result; no action */
2460			break;
2461
2462		case AST_handle_k:
2463			/* handle_t not shipped; no action */
2464			break;
2465
2466		case AST_array_k:
2467			if (type_p->type_structure.array->index_count > 1
2468					&& inst_p != NULL && AST_STRING0_SET(inst_p))
2469			{
2470				IR_gen_multid_aos(ctx_p, type_p, inst_p);
2471			}
2472			else
2473			{
2474				/*
2475				 * Caller will have set IR_STRING flag for stringified pointer case,
2476				 * we must set it here for stringified array case.
2477				 */
2478				if (IR_STRING_ARRAY(type_p, inst_p))
2479					flags |= IR_STRING;
2480				IR_gen_array_rep(ctx_p, type_p, inst_p, flags);
2481			}
2482			break;
2483
2484		case AST_structure_k:
2485			IR_gen_struct_rep(ctx_p, type_p, inst_p);
2486			break;
2487
2488		case AST_pipe_k:
2489			IR_gen_pipe_rep(ctx_p, type_p, inst_p);
2490			break;
2491
2492		case AST_pointer_k:
2493			/*
2494			 * Test first for context handle, which is only valid use of void *.
2495			 * Context handles can only be parameters.
2496			 * Look for object references, which are not normal pointers
2497			 */
2498			if (type_p->type_structure.pointer->pointee_type->kind == AST_void_k)
2499			{
2500				if (inst_p == NULL || !AST_CONTEXT_SET(inst_p))
2501				{
2502					INTERNAL_ERROR("void * in invalid context");
2503				}
2504				else
2505					IR_gen_context_rep(ctx_p, type_p, (AST_parameter_n_t *)inst_p);
2506			}
2507			else if (AST_CONTEXT_RD_SET(type_p)
2508					&& type_p->type_structure.pointer->pointee_type->kind
2509					== AST_structure_k)
2510				IR_gen_context_rep(ctx_p, type_p, (AST_parameter_n_t *)inst_p);
2511			else if (type_p->type_structure.pointer->pointee_type->kind == AST_interface_k)
2512				IR_gen_interface_rep(ctx_p, type_p->type_structure.pointer->pointee_type, inst_p);
2513			else
2514				IR_gen_pointer_rep(ctx_p, type_p, inst_p);
2515			break;
2516
2517		case AST_function_k:
2518			/*** NYI ***/
2519			break;
2520
2521		case AST_disc_union_k:
2522			IR_gen_disc_union_rep(ctx_p, type_p, inst_p);
2523			break;
2524
2525		default:
2526			INTERNAL_ERROR("Unexpected type kind");
2527	}
2528}
2529
2530/*
2531*  I R _ g e n _ p a r a m _ r e p
2532 *
2533*  Generates tuples to describe an operation parameter.
2534*/
2535void IR_gen_param_rep
2536(
2537 AST_parameter_n_t   *param_p    /* [io] AST parameter node */
2538)
2539{
2540	IR_scope_ctx_t      *ctx_p;     /* Scope context */
2541	IR_tup_n_t          *tup_p;     /* Ptr to generated IREP tuple */
2542
2543	/* Initialize scope context */
2544	ctx_p = IR_init_scope(param_p);
2545
2546	/* Initialize parameter IREP info. */
2547	IR_INIT_NODE(param_p);
2548
2549	/*
2550	 * If this is the first [in] and/or [out] parameter in its operation, and
2551	 * the operation has non-fixed array(s) of [cs_char] type, generate a
2552	 * 'codeset shadow' tuple.
2553	 */
2554	if (FE_TEST(param_p->fe_info->flags, FE_FIRST_IN_NF_CS_ARR)
2555			|| FE_TEST(param_p->fe_info->flags, FE_FIRST_OUT_NF_CS_ARR))
2556	{
2557		tup_p = IR_gen_irep_tup(ctx_p, IR_op_codeset_shadow_k);
2558		tup_p->arg[IR_ARG_INT].int_val = 0;
2559		if (!FE_TEST(param_p->fe_info->flags, FE_FIRST_IN_NF_CS_ARR))
2560			tup_p->flags |= IR_OUT_ONLY;
2561		if (!FE_TEST(param_p->fe_info->flags, FE_FIRST_OUT_NF_CS_ARR))
2562			tup_p->flags |= IR_IN_ONLY;
2563	}
2564
2565	/* Generate irep for parameter and contained types */
2566	IR_gen_type_rep(ctx_p, param_p->type, (AST_instance_n_t *)param_p, 0);
2567
2568	/*
2569	 * If this is the last [in] and/or [out] parameter in its operation, and
2570	 * the operation has non-fixed array(s) of [cs_char] type, generate a
2571	 * 'release shadow' tuple.
2572	 */
2573	if (FE_TEST(param_p->fe_info->flags, FE_LAST_IN_NF_CS_ARR)
2574			|| FE_TEST(param_p->fe_info->flags, FE_LAST_OUT_NF_CS_ARR))
2575	{
2576		tup_p = IR_gen_irep_tup(ctx_p, IR_op_release_shadow_k);
2577		if (!FE_TEST(param_p->fe_info->flags, FE_LAST_IN_NF_CS_ARR))
2578			tup_p->flags |= IR_OUT_ONLY;
2579		if (!FE_TEST(param_p->fe_info->flags, FE_LAST_OUT_NF_CS_ARR))
2580			tup_p->flags |= IR_IN_ONLY;
2581	}
2582
2583	/* Cleanup scope context */
2584	IR_finish_scope(ctx_p);
2585}
2586
2587/*
2588*  I R _ g e n _ i r e p
2589 *
2590*  Main IREP routine - generates the Intermediate Representation of an IDL
2591*  interface from the Abstract Syntax Tree representation of the interface.
2592*  Tuples that describe the irep are hung off AST parameter and type nodes.
2593*/
2594boolean IR_gen_irep                 /* Returns TRUE on success */
2595(
2596 boolean             *cmd_opt ATTRIBUTE_UNUSED,   /* [in] array of cmd option flags */
2597 void                **cmd_val ATTRIBUTE_UNUSED,  /* [in] array of cmd option values */
2598 struct AST_interface_n_t *int_p /* [io] interface abstract syntax tree */
2599)
2600{
2601	AST_export_n_t      *export_p;
2602	AST_operation_n_t   *oper_p;
2603	AST_parameter_n_t   *param_p;
2604
2605	for (export_p = int_p->exports; export_p != NULL; export_p = export_p->next)
2606	{
2607		if (export_p->kind == AST_operation_k)
2608		{
2609			oper_p = export_p->thing_p.exported_operation;
2610
2611			/* Process each parameter */
2612			for (param_p = oper_p->parameters;
2613					param_p != NULL;
2614					param_p = param_p->next)
2615			{
2616				IR_gen_param_rep(param_p);
2617			}
2618
2619			/* Process the operation result */
2620			IR_gen_param_rep(oper_p->result);
2621		}
2622	}
2623
2624	return TRUE;
2625}
2626