1/*
2 * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#define __srr0 srr0
25#define __r1 r1
26#define __eip eip
27#define __esp esp
28#define __es es
29#define __ds ds
30#define __ss ss
31#define __cs cs
32
33#ifdef SHLIB
34#include "shlib.h"
35#endif /* SHLIB */
36/*
37 * This file contains the routines that drives the layout phase of the
38 * link-editor.  In this phase the output file's addresses and offset are
39 * set up.
40 */
41#include <stdlib.h>
42#if !(defined(KLD) && defined(__STATIC__))
43#include <stdio.h>
44#include <mach/mach.h>
45#else
46#include <mach/mach.h>
47#endif /* !(defined(KLD) && defined(__STATIC__)) */
48#include <stdarg.h>
49#include <string.h>
50#include <sys/param.h>
51#include "stuff/openstep_mach.h"
52#include <mach-o/fat.h>
53#include <mach-o/loader.h>
54#import <mach/m68k/thread_status.h>
55#undef MACHINE_THREAD_STATE	/* need to undef these to avoid warnings */
56#undef MACHINE_THREAD_STATE_COUNT
57#undef THREAD_STATE_NONE
58#undef VALID_THREAD_STATE_FLAVOR
59#import <mach/ppc/thread_status.h>
60#undef MACHINE_THREAD_STATE	/* need to undef these to avoid warnings */
61#undef MACHINE_THREAD_STATE_COUNT
62#undef THREAD_STATE_NONE
63#undef VALID_THREAD_STATE_FLAVOR
64#import <mach/m88k/thread_status.h>
65#import <mach/i860/thread_status.h>
66#import <mach/i386/thread_status.h>
67#import <mach/hppa/thread_status.h>
68#import <mach/sparc/thread_status.h>
69#include <mach-o/nlist.h>
70#include <mach-o/reloc.h>
71#if defined(RLD) && !defined(SA_RLD) && !(defined(KLD) && defined(__STATIC__))
72#include <mach-o/rld.h>
73#include <streams/streams.h>
74#endif /* defined(RLD) && !defined(SA_RLD) &&
75	  !(defined(KLD) && defined(__STATIC__)) */
76
77#include "stuff/arch.h"
78#include "stuff/macosx_deployment_target.h"
79
80#include "ld.h"
81#include "specs.h"
82#include "fvmlibs.h"
83#include "dylibs.h"
84#include "live_refs.h"
85#include "objects.h"
86#include "sections.h"
87#include "pass1.h"
88#include "symbols.h"
89#include "layout.h"
90#include "pass2.h"
91#include "sets.h"
92#include "mach-o/sarld.h"
93#include "indirect_sections.h"
94#include "uuid.h"
95
96#ifdef RLD
97__private_extern__ long RLD_DEBUG_OUTPUT_FILENAME_flag;
98#endif
99
100/* The output file's mach header */
101__private_extern__ struct mach_header output_mach_header = { 0 };
102
103/*
104 * The output file's symbol table load command and the offsets used in the
105 * second pass to output the symbol table and string table.
106 */
107__private_extern__ struct symtab_info output_symtab_info = { {0} };
108
109/*
110 * The output file's dynamic symbol table load command.
111 */
112__private_extern__ struct dysymtab_info output_dysymtab_info = { {0} };
113
114/*
115 * The output file's two level hints load command.
116 */
117__private_extern__ struct hints_info output_hints_info = { { 0 } };
118
119/*
120 * The output file's prebind_cksum load command.
121 */
122__private_extern__ struct cksum_info output_cksum_info = { { 0 } };
123
124/*
125 * The output file's UUID load command.
126 */
127__private_extern__ struct uuid_info output_uuid_info = { 0 };
128
129/*
130 * The output file's thread load command and the machine specific information
131 * for it.
132 */
133__private_extern__ struct thread_info output_thread_info = { {0} };
134
135/*
136 * The output file's routines load command and the specific information for it.
137 */
138__private_extern__ struct routines_info output_routines_info = { {0} };
139
140/*
141 * The thread states that are currently known by this link editor.
142 * (for the specific cputypes)
143 */
144/* cputype == CPU_TYPE_MC680x0, all cpusubtype's */
145static struct m68k_thread_state_regs mc680x0 = { {0} };
146/* cputype == CPU_TYPE_POWERPC, all cpusubtype's */
147static ppc_thread_state_t powerpc = { 0 };
148/* cputype == CPU_TYPE_MC88000, all cpusubtype's */
149static m88k_thread_state_grf_t mc88000 = { 0 };
150/* cputype == CPU_TYPE_I860, all cpusubtype's */
151static struct i860_thread_state_regs i860 = { {0} };
152/* cputype == CPU_TYPE_I386, all cpusubtype's */
153static i386_thread_state_t intel386 = { 0 };
154/* cputype == CPU_TYPE_HPPA, all cpusubtypes */
155static struct hp_pa_frame_thread_state hppa_frame_state = { 0 };
156static struct hp_pa_integer_thread_state hppa_integer_state = { 0 };
157/* cputype == CPU_TYPE_SPARC, all subtypes */
158static struct sparc_thread_state_regs sparc_state = { {0} };
159/* cputype == CPU_TYPE_ARM, all subtypes */
160static arm_thread_state_t arm_state = { {0} };
161
162static void layout_segments(void);
163static unsigned long next_vmaddr(
164    unsigned long vmaddr,
165    unsigned long vmsize);
166static int qsort_vmaddr(
167    const struct merged_segment **msg1,
168    const struct merged_segment **msg2);
169#ifndef RLD
170static enum bool check_reserved_segment(char *segname,
171					char *reserved_error_string);
172static void check_overlap(struct merged_segment *msg1,
173			  struct merged_segment *msg2,
174			  enum bool prebind_check,
175    			  struct merged_segment *outputs_linkedit_segment);
176static void check_for_overlapping_segments(
177    struct merged_segment *outputs_linkedit_segment);
178static void check_for_lazy_pointer_relocs_too_far(void);
179static void print_load_map(void);
180static void print_load_map_for_objects(struct merged_section *ms);
181#endif /* !defined(RLD) */
182
183/*
184 * layout() is called from main() and lays out the output file.
185 */
186__private_extern__
187void
188layout(void)
189{
190#ifdef RLD
191	memset(&output_mach_header, '\0', sizeof(struct mach_header));
192	memset(&output_symtab_info, '\0', sizeof(struct symtab_info));
193	memset(&output_dysymtab_info, '\0', sizeof(struct dysymtab_info));
194	memset(&output_hints_info, '\0', sizeof(struct hints_info));
195	memset(&output_cksum_info, '\0', sizeof(struct cksum_info));
196#ifndef KLD
197	memset(&output_uuid_info, '\0', sizeof(struct uuid_info));
198#endif
199	memset(&output_thread_info, '\0', sizeof(struct thread_info));
200	memset(&mc680x0, '\0', sizeof(struct m68k_thread_state_regs));
201	memset(&powerpc,     '\0', sizeof(ppc_thread_state_t));
202	memset(&mc88000, '\0', sizeof(m88k_thread_state_grf_t));
203	memset(&intel386,'\0', sizeof(i386_thread_state_t));
204	intel386.es = USER_DATA_SELECTOR;
205	intel386.ds = USER_DATA_SELECTOR;
206	intel386.ss = USER_DATA_SELECTOR;
207	intel386.cs = USER_CODE_SELECTOR;
208	memset(&hppa_frame_state, '\0',
209		sizeof(struct hp_pa_frame_thread_state));
210	memset(&hppa_integer_state, '\0',
211		sizeof(struct hp_pa_integer_thread_state));
212	memset(&sparc_state, '\0', sizeof(struct sparc_thread_state_regs));
213#endif /* RLD */
214	/*
215	 * First finish creating all sections that will be in the final output
216	 * file.  This involves defining common symbols which can create a
217	 * (__DATA,__common) section and creating sections from files (via
218	 * -sectcreate options).
219	 */
220	define_common_symbols();
221	/*
222	 * Process the command line specifications for the sections including
223	 * creating sections from files.
224	 */
225#ifndef RLD
226	process_section_specs();
227#endif /* !defined(RLD) */
228
229	/*
230	 * So literal pointer sections can use indirect symbols these need to
231	 * be resolved before the literal pointer section is merged.
232	 */
233	reduce_indr_symbols();
234	if(errors)
235	    return;
236
237#ifndef RLD
238	/*
239	 * Setup the link editor defined symbols if the output file type could
240	 * be output for dyld. This is needed because the symbol needs to be
241	 * defined and set to a private extern so that file can be laid out
242	 * even though we don't know it's address at this point.
243	 */
244	if(filetype == MH_EXECUTE ||
245	   filetype == MH_BUNDLE ||
246	   filetype == MH_DYLIB ||
247	   filetype == MH_DYLINKER){
248	    setup_link_editor_symbols();
249	    if(undefined_flag == UNDEFINED_DEFINE_A_WAY)
250		define_undefined_symbols_a_way();
251	}
252	if(filetype == MH_PRELOAD)
253	    define_link_editor_preload_symbols(TRUE);
254#endif /* !defined(RLD) */
255
256	/*
257	 * Now that the alignment of all the sections has been determined (from
258	 * the command line and the object files themselves) the literal
259	 * sections can be merged with the correct alignment and their sizes
260	 * in the output file can be determined.
261	 */
262	save_lazy_symbol_pointer_relocs = prebinding;
263	merge_literal_sections(FALSE);
264	if(errors)
265	    return;
266#ifdef DEBUG
267	if(debug & (1 << 21))
268	    print_merged_section_stats();
269#endif /* DEBUG */
270
271	/*
272	 * Segments with only debug sections do not appear in the output.
273	 * So before dead code stripping and laying out the segments and
274	 * sections for the output remove them from the merged segment and
275	 * merged sections list.
276	 */
277	remove_debug_segments();
278
279#ifndef RLD
280	/*
281	 * Layout any sections that have -sectorder options specified for them.
282	 */
283	layout_ordered_sections();
284
285	/*
286	 * If -dead_strip is specified mark the fine_relocs that are live and
287	 * update all the merged counts, and resize everything to only contain
288	 * live items.
289	 */
290	if(dead_strip == TRUE){
291	    /*
292	     * Mark the fine_relocs and symbols that are live.
293	     */
294	    live_marking();
295
296	    /*
297	     * If live_marking() encountered a relocation error just return now.
298	     */
299	    if(errors)
300		return;
301
302	    /*
303	     * Now with all the live fine_relocs and live merged symbols marked
304	     * merged tables, calculated sizes and counts for everything that is
305	     * live.
306	     */
307	    count_live_symbols();
308
309	    /*
310	     * Now resize all sections using the live block sizes and adjust
311	     * the number of relocation entries so the counts includes entries
312	     * only for live blocks.
313	     */
314	    resize_live_sections();
315
316	    /*
317	     * Now re-merge all the literal sections so that only live literals
318	     * end up in the output.
319	     */
320	    merge_literal_sections(TRUE);
321	}
322#endif /* RLD */
323
324	/*
325	 * Report undefined symbols and account for the merged symbols that will
326	 * not be in the output file.
327	 */
328	process_undefineds();
329	if(errors)
330	    return;
331
332#ifndef RLD
333	/*
334	 * Check to make sure symbols are not overridden in dependent dylibs
335	 * when prebinding.
336	 */
337	prebinding_check_for_dylib_override_symbols();
338
339	/*
340	 * If the users wants to see warnings about unused multiply defined
341	 * symbols when -twolevel_namespace is in effect then check for them
342	 * and print out a warning.
343	 */
344	if(nowarnings == FALSE &&
345	   twolevel_namespace == TRUE &&
346	   multiply_defined_unused_flag != MULTIPLY_DEFINED_SUPPRESS)
347	    twolevel_namespace_check_for_unused_dylib_symbols();
348#endif /* RLD */
349
350	/*
351	 * Assign the symbol table indexes for the symbol table entries.
352	 */
353	assign_output_symbol_indexes();
354
355#ifndef RLD
356	/*
357	 * Layout the dynamic shared library tables if the output is a MH_DYLIB
358	 * file.
359	 */
360	if(filetype == MH_DYLIB)
361	    layout_dylib_tables();
362
363	/*
364	 * If the output is intended for the dynamic link editor or -dead_strip
365	 * is specified relayout the relocation entries for only the ones that
366	 * will be in the output file.
367	 */
368	if(output_for_dyld == TRUE || dead_strip == TRUE)
369	    relayout_relocs();
370
371	/*
372	 * If the segment alignment is not set, set it based on the target
373	 * architecture.
374	 */
375	if(segalign_specified == FALSE)
376#endif /* !defined(RLD) */
377	    segalign = get_segalign_from_flag(&arch_flag);
378
379	/*
380	 * Set the segment addresses, protections and offsets into the file.
381	 */
382	layout_segments();
383
384#ifndef RLD
385	/*
386	 * For symbol from dylibs reset the prebound symbols if not prebinding.
387	 */
388	reset_prebound_undefines();
389
390	if(load_map)
391	    print_load_map();
392#endif /* !defined(RLD) */
393
394#ifdef DEBUG
395	if(debug & (1 << 7)){
396	    print_mach_header();
397	    print_merged_sections("after layout");
398	    print_symtab_info();
399	    print_thread_info();
400	}
401	if(debug & (1 << 8))
402	    print_symbol_list("after layout", FALSE);
403	if(debug & (1 << 20))
404	    print_object_list();
405#endif /* DEBUG */
406}
407
408#if defined(RLD) && !defined(SA_RLD) && !(defined(KLD) && defined(__STATIC__))
409/*
410 * layout_rld_symfile() is called from rld_write_symfile() and lays out the
411 * output file.  This contains only a mach_header, a symtab load command the
412 * symbol and string table for the current set of merged symbols.
413 */
414__private_extern__
415void
416layout_rld_symfile(void)
417{
418    unsigned long offset;
419    kern_return_t r;
420
421	memset(&output_mach_header, '\0', sizeof(struct mach_header));
422	memset(&output_symtab_info, '\0', sizeof(struct symtab_info));
423
424	/*
425	 * Create the symbol table load command.
426	 */
427	output_symtab_info.symtab_command.cmd = LC_SYMTAB;
428	output_symtab_info.symtab_command.cmdsize =
429						sizeof(struct symtab_command);
430	output_symtab_info.symtab_command.nsyms = nmerged_symbols;
431	output_symtab_info.symtab_command.strsize =
432	    rnd(merged_string_size + STRING_SIZE_OFFSET,
433		  sizeof(unsigned long));
434	output_symtab_info.output_strpad =
435	    output_symtab_info.symtab_command.strsize -
436	    (merged_string_size + STRING_SIZE_OFFSET);
437	output_symtab_info.output_merged_strsize = STRING_SIZE_OFFSET;
438	output_symtab_info.output_local_strsize = STRING_SIZE_OFFSET +
439						      merged_string_size;
440	/*
441	 * Fill in the mach_header for the output file.
442	 */
443	output_mach_header.magic = MH_MAGIC;
444	output_mach_header.cputype = arch_flag.cputype;
445	output_mach_header.cpusubtype = arch_flag.cpusubtype;
446	output_mach_header.filetype = filetype;
447	output_mach_header.ncmds = 1;
448	output_mach_header.sizeofcmds =
449	 			output_symtab_info.symtab_command.cmdsize;
450	output_mach_header.flags = 0;
451
452	/*
453	 * Lay everything out setting the offsets.
454	 */
455	offset = sizeof(struct mach_header) + output_mach_header.sizeofcmds;
456	output_symtab_info.symtab_command.symoff = offset;
457	offset += output_symtab_info.symtab_command.nsyms *
458		  sizeof(struct nlist);
459	output_symtab_info.symtab_command.stroff = offset;
460	offset += output_symtab_info.symtab_command.strsize;
461
462	/*
463	 * Allocate the buffer for the output file.
464	 */
465	output_size = offset;
466	if((r = vm_allocate(mach_task_self(), (vm_address_t *)&output_addr,
467			    output_size, TRUE)) != KERN_SUCCESS)
468	    mach_fatal(r, "can't vm_allocate() memory for output of size "
469		       "%lu", output_size);
470#ifdef RLD_VM_ALLOC_DEBUG
471	print("rld() vm_allocate: addr = 0x%0x size = 0x%x\n",
472	      (unsigned int)output_addr, (unsigned int)output_size);
473#endif /* RLD_VM_ALLOC_DEBUG */
474}
475#endif /* defined(RLD) && !defined(SA_RLD) &&
476	  !(defined(KLD) && defined(__STATIC__)) */
477
478/*
479 * layout_segments() basicly lays out the addresses and file offsets of
480 * everything in the ouput file (since everything can be in a segment).
481 * It checks for the link editor reserved segment "__PAGEZERO" and "__LINKEDIT"
482 * and prints an error message if they exist in the output.  It creates these
483 * segments if this is the right file type and the right options are specified.
484 * It processes all the segment specifications from the command line options.
485 * Sets the addresses of all segments and sections in those segments and sets
486 * the sizes of all segments.  Also sets the file offsets of all segments,
487 * sections, relocation information and symbol table information.  It creates
488 * the mach header that will go in the output file.  It numbers the merged
489 * sections with their section number they will have in the output file.
490 */
491static
492void
493layout_segments(void)
494{
495    unsigned long i, ncmds, sizeofcmds, headers_size, offset;
496    unsigned long addr, size, max_first_align, pad, max_align;
497    struct merged_segment **p, *msg, *first_msg;
498    struct merged_section **content, **zerofill, *ms;
499#ifndef RLD
500    struct merged_fvmlib **q, *mfl;
501    unsigned long nfvmlibs;
502    struct segment_spec *seg_spec;
503    enum bool address_zero_specified;
504    struct merged_dylib *mdl;
505    struct dynamic_library *dp;
506#endif /* !defined(RLD) */
507#ifdef RLD
508#ifndef SA_RLD
509    kern_return_t r;
510#endif /* !defined SA_RLD */
511    unsigned long allocate_size;
512#endif /* defined(RLD) */
513    struct merged_symbol *merged_symbol;
514
515    static struct merged_segment linkedit_segment = { {0} };
516    static struct merged_segment pagezero_segment = { {0} };
517    static struct merged_segment stack_segment = { {0} };
518    static struct merged_segment object_segment = { {0} };
519
520#ifdef RLD
521	memset(&object_segment, '\0', sizeof(struct merged_segment));
522	original_merged_segments = merged_segments;
523#endif /* RLD */
524
525	/*
526	 * If the file type is MH_OBJECT then place all the sections in one
527	 * unnamed segment.
528	 */
529	if(filetype == MH_OBJECT){
530	    object_segment.filename = outputfile;
531	    content = &(object_segment.content_sections);
532	    zerofill = &(object_segment.zerofill_sections);
533	    p = &merged_segments;
534	    while(*p){
535		msg = *p;
536		object_segment.sg.nsects += msg->sg.nsects;
537		*content = msg->content_sections;
538		while(*content){
539		    ms = *content;
540		    content = &(ms->next);
541		}
542		*zerofill = msg->zerofill_sections;
543		while(*zerofill){
544		    ms = *zerofill;
545		    zerofill = &(ms->next);
546		}
547		p = &(msg->next);
548	    }
549	    if(object_segment.sg.nsects != 0)
550		merged_segments = &object_segment;
551	}
552
553#ifndef RLD
554	/*
555	 * Set thread_in_output in the output_thread_info if we are going to
556	 * create a thread command.  It is created if this is filetype is not a
557	 * shared library or bundle and we have seen an object file so we know
558	 * what type of machine the thread is for.  Or if we haven't seen an
559	 * object file but entry point symbol name was specified.
560	 */
561 	if(filetype != MH_FVMLIB &&
562	   filetype != MH_DYLIB &&
563	   filetype != MH_BUNDLE &&
564	   arch_flag.cputype != 0 &&
565 	   ((merged_segments != NULL &&
566	     merged_segments->content_sections != NULL) ||
567	     entry_point_name != NULL)){
568
569	    output_thread_info.thread_in_output = TRUE;
570
571	    if(filetype == MH_DYLINKER)
572		output_thread_info.thread_command.cmd = LC_THREAD;
573	    else
574		output_thread_info.thread_command.cmd = LC_UNIXTHREAD;
575
576	    /*
577	     * If the stack address or size is set then create the stack segment
578	     * for it.
579	     */
580	    if((stack_addr_specified == TRUE || stack_size_specified == TRUE) &&
581	       output_thread_info.thread_command.cmd == LC_UNIXTHREAD){
582		if(check_reserved_segment(SEG_UNIXSTACK, "segment "
583			SEG_UNIXSTACK " reserved for the -stack_addr and "
584			"-stack_size options")){
585		    /*
586		     * There shouldn't be any segment specifications for this
587		     * segment except protection. Protection must be at least
588		     * rw- and defaults to architecure's segment protection
589		     * default for both initial and maximum protection.
590		     */
591		    seg_spec = lookup_segment_spec(SEG_UNIXSTACK);
592		    if(seg_spec != NULL){
593			if(seg_spec->addr_specified)
594			    error("specified address for segment " SEG_UNIXSTACK
595				  " not allowed (segment " SEG_UNIXSTACK
596				  " reserved for unix stack, use -stack_addr)");
597			if(seg_spec->prot_specified){
598			    if((seg_spec->maxprot &
599			        (VM_PROT_READ | VM_PROT_WRITE)) !=
600				(VM_PROT_READ | VM_PROT_WRITE)){
601				error("specified maximum protection for "
602				  "segment " SEG_UNIXSTACK " must include read "
603				  " and write");
604				seg_spec->maxprot |= (VM_PROT_READ |
605						      VM_PROT_WRITE);
606			    }
607			    if((seg_spec->initprot &
608			        (VM_PROT_READ | VM_PROT_WRITE)) !=
609				(VM_PROT_READ | VM_PROT_WRITE)){
610				error("specified initial protection for "
611				  "segment " SEG_UNIXSTACK " must include read "
612				  " and write");
613				seg_spec->initprot |= (VM_PROT_READ |
614						       VM_PROT_WRITE);
615			    }
616			    stack_segment.sg.maxprot = seg_spec->maxprot;
617			    stack_segment.sg.initprot = seg_spec->initprot;
618			    /*
619			     * Only if the protection of the static is specified
620			     * to include execute permision do we also cause the
621			     * MH_ALLOW_STACK_EXECUTION bit to get set.
622			     */
623			    if((stack_segment.sg.maxprot & VM_PROT_EXECUTE) ==
624			       VM_PROT_EXECUTE)
625				allow_stack_execute = TRUE;
626			}
627			seg_spec->processed = TRUE;
628			stack_segment.prot_set = TRUE;
629		    }
630		    else{
631			stack_segment.sg.maxprot =
632			    get_segprot_from_flag(&arch_flag);
633			stack_segment.sg.initprot = stack_segment.sg.maxprot;
634			stack_segment.prot_set = TRUE;
635		    }
636		    if(stack_addr_specified == TRUE){
637			if(stack_addr % segalign != 0)
638			    fatal("-stack_addr: 0x%x not a multiple of the "
639				  "segment alignment (0x%x)",
640				  (unsigned int)stack_addr,
641				  (unsigned int)segalign);
642		    }
643		    else{
644			stack_addr = get_stack_addr_from_flag(&arch_flag);
645			warning("no -stack_addr specified using the default "
646				"addr: 0x%x", (unsigned int)stack_addr);
647			stack_addr_specified = TRUE;
648		    }
649		    if(stack_size_specified == TRUE){
650			if(stack_size % segalign != 0)
651			    fatal("-stack_size: 0x%x not a multiple of the "
652				  "segment alignment (0x%x)",
653				  (unsigned int)stack_size,
654				  (unsigned int)segalign);
655		    }
656		    else{
657			stack_size = get_stack_size_from_flag(&arch_flag);
658			warning("no -stack_size specified using the default "
659				"size: 0x%x", (unsigned int)stack_size);
660		    }
661		    stack_segment.filename = outputfile;
662		    strcpy(stack_segment.sg.segname, SEG_UNIXSTACK);
663		    if(get_stack_direction_from_flag(&arch_flag) < 0)
664			stack_segment.sg.vmaddr = stack_addr - stack_size;
665		    else
666			stack_segment.sg.vmaddr = stack_addr;
667		    stack_segment.sg.vmsize = stack_size;
668		    stack_segment.addr_set = TRUE;
669		    /* place this last in the merged segment list */
670		    p = &merged_segments;
671		    while(*p){
672			msg = *p;
673			p = &(msg->next);
674		    }
675		    *p = &stack_segment;
676		}
677	    }
678	}
679	else{
680	    output_thread_info.thread_in_output = FALSE;
681	}
682
683	/*
684	 * Set routines_in_output in the output_routines_info if we are going to
685	 * create a routines command.  It is created if this filetype is a
686	 * shared library and an init name was specified.
687	 */
688 	if(filetype == MH_DYLIB && init_name != NULL){
689	    output_routines_info.routines_in_output = TRUE;
690	    output_routines_info.routines_command.cmd = LC_ROUTINES;
691	    output_routines_info.routines_command.cmdsize =
692		sizeof(struct routines_command);
693	}
694	else{
695	    output_routines_info.routines_in_output = FALSE;
696	}
697
698	/*
699	 * Create the link edit segment if specified and size it.
700	 */
701	if(filetype == MH_EXECUTE ||
702	   filetype == MH_BUNDLE ||
703	   filetype == MH_FVMLIB ||
704	   filetype == MH_DYLIB ||
705	   filetype == MH_DYLINKER){
706	    if(check_reserved_segment(SEG_LINKEDIT, "segment " SEG_LINKEDIT
707				   " reserved for the -seglinkedit option")){
708		/*
709		 * Now that the above check has been made and the segment is
710		 * known not to exist create the link edit segment if specified.
711		 */
712		if(seglinkedit == TRUE){
713		    /*
714		     * Fill in the merged segment.  In this case only the
715		     * segment name and filesize of the segment are not zero
716		     * or NULL.  Note the link edit segment is unique in that
717		     * it's filesize is not rounded to the segment alignment.
718		     * This can only be done because this is the last segment
719		     * in the file (right before end of file).
720		     */
721		    linkedit_segment.filename = outputfile;
722		    strcpy(linkedit_segment.sg.segname, SEG_LINKEDIT);
723		    if(save_reloc)
724			linkedit_segment.sg.filesize += nreloc *
725						sizeof(struct relocation_info);
726		    if(output_for_dyld)
727			linkedit_segment.sg.filesize +=
728			    (output_dysymtab_info.dysymtab_command.nlocrel +
729			     output_dysymtab_info.dysymtab_command.nextrel) *
730			    sizeof(struct relocation_info);
731		    if(filetype == MH_DYLIB)
732			linkedit_segment.sg.filesize +=
733			    output_dysymtab_info.dysymtab_command.ntoc *
734				sizeof(struct dylib_table_of_contents) +
735			    output_dysymtab_info.dysymtab_command.nmodtab *
736				sizeof(struct dylib_module) +
737			    output_dysymtab_info.dysymtab_command.nextrefsyms *
738				sizeof(struct dylib_reference);
739		    if(nindirectsyms != 0)
740			linkedit_segment.sg.filesize +=
741			    nindirectsyms * sizeof(unsigned long);
742		    if(strip_level != STRIP_ALL)
743			linkedit_segment.sg.filesize +=
744			    (nmerged_symbols
745			     - nstripped_merged_symbols
746			     + nlocal_symbols
747			     - nmerged_symbols_referenced_only_from_dylibs) *
748			    sizeof(struct nlist) +
749			    rnd(merged_string_size +
750				  local_string_size +
751				  STRING_SIZE_OFFSET,
752				  sizeof(unsigned long));
753		    else
754			warning("segment created for -seglinkedit zero size "
755			        "(output file stripped)");
756		    if(output_for_dyld &&
757		       twolevel_namespace == TRUE &&
758		       twolevel_namespace_hints == TRUE)
759			linkedit_segment.sg.filesize +=
760			    output_hints_info.twolevel_hints_command.nhints *
761			    sizeof(struct twolevel_hint);
762		    linkedit_segment.sg.vmsize =
763				rnd(linkedit_segment.sg.filesize, segalign);
764		    /* place this last in the merged segment list */
765		    p = &merged_segments;
766		    while(*p){
767			msg = *p;
768			p = &(msg->next);
769		    }
770		    *p = &linkedit_segment;
771		}
772	    }
773	}
774
775	/*
776	 * If the the file type is MH_EXECUTE and address zero has not been
777	 * assigned to a segment create the "__PAGEZERO" segment.
778	 */
779	if(filetype == MH_EXECUTE){
780	    if(check_reserved_segment(SEG_PAGEZERO, "segment " SEG_PAGEZERO
781				      " reserved for address zero through "
782				      "segment alignment")){
783		/*
784		 * There shouldn't be any segment specifications for this
785		 * segment (address or protection).
786		 */
787		seg_spec = lookup_segment_spec(SEG_PAGEZERO);
788		if(seg_spec != NULL){
789		    if(seg_spec->addr_specified)
790			error("specified address for segment " SEG_PAGEZERO
791			      " not allowed (segment " SEG_PAGEZERO " reserved "
792			      "for address zero through segment alignment)");
793		    if(seg_spec->prot_specified)
794			error("specified protection for segment " SEG_PAGEZERO
795			      " not allowed (segment " SEG_PAGEZERO " reserved "
796			      "for address zero through segment alignment and "
797			      "has no assess protections)");
798		    seg_spec->processed = TRUE;
799		}
800		address_zero_specified = FALSE;
801		for(i = 0; i < nsegment_specs; i++){
802		    if(segment_specs[i].addr_specified &&
803		       segment_specs[i].addr == 0 &&
804		       &(segment_specs[i]) != seg_spec){
805			address_zero_specified = TRUE;
806			break;
807		    }
808		}
809		if(address_zero_specified == FALSE &&
810		   (seg1addr_specified == FALSE || seg1addr != 0)){
811		    pagezero_segment.filename = outputfile;
812		    pagezero_segment.addr_set = TRUE;
813		    pagezero_segment.prot_set = TRUE;
814		    strcpy(pagezero_segment.sg.segname, SEG_PAGEZERO);
815		    if(pagezero_size != 0)
816			pagezero_segment.sg.vmsize = rnd(pagezero_size,
817							   segalign);
818		    else
819			pagezero_segment.sg.vmsize = segalign;
820		    /* place this first in the merged segment list */
821		    pagezero_segment.next = merged_segments;
822		    merged_segments = &pagezero_segment;
823		}
824	    }
825	}
826
827	/*
828	 * Process the command line specifications for the segments setting the
829	 * addresses and protections for the specified segments into the merged
830	 * segments.
831	 */
832	process_segment_specs();
833#endif /* !defined(RLD) */
834
835#ifndef RLD
836	/*
837	 * If there is a "__TEXT" segment who's protection has not been set
838	 * set it's inital protection to "r-x" and it's maximum protection
839	 * to "rwx".
840	 */
841	msg = lookup_merged_segment(SEG_TEXT);
842	if(msg != NULL && msg->prot_set == FALSE){
843	    msg->sg.initprot = VM_PROT_READ | VM_PROT_EXECUTE;
844	    msg->sg.maxprot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
845	    msg->prot_set = TRUE;
846	}
847	/*
848	 * If there is a "__IMPORT" segment who's protection has not been set
849	 * set it's inital protection to "rwx" and it's maximum protection
850	 * to "rwx".
851	 */
852	msg = lookup_merged_segment(SEG_IMPORT);
853	if(msg != NULL && msg->prot_set == FALSE){
854	    msg->sg.initprot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
855	    msg->sg.maxprot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
856	    msg->prot_set = TRUE;
857	}
858	/*
859	 * If the "__LINKEDIT" segment is created sets its inital protection to
860	 * "r--" and it's maximum protection to the architecture's default.
861	 */
862	if(seglinkedit){
863	    msg = lookup_merged_segment(SEG_LINKEDIT);
864	    if(msg != NULL && msg->prot_set == FALSE){
865		msg->sg.initprot = VM_PROT_READ;
866		msg->sg.maxprot = get_segprot_from_flag(&arch_flag);
867		msg->prot_set = TRUE;
868	    }
869	}
870#endif /* !defined(RLD) */
871
872	/*
873	 * Set the protections of segments that have not had their protection
874	 * set to the architecture's default protection.
875	 */
876	p = &merged_segments;
877	while(*p){
878	    msg = *p;
879	    if(msg->prot_set == FALSE){
880		/*
881		 * Only turn on execute protection if any of the sections in
882		 * the segment contain some instructions.  For pre-4.0 objects
883		 * this is always in the (__TEXT,__text) section and is handled
884		 * above anyway.
885		 */
886		msg->sg.initprot = VM_PROT_READ | VM_PROT_WRITE;
887		content = &(msg->content_sections);
888		while(*content){
889		    ms = *content;
890		    if((ms->s.flags & S_ATTR_SOME_INSTRUCTIONS) ==
891			S_ATTR_SOME_INSTRUCTIONS){
892			msg->sg.initprot |= VM_PROT_EXECUTE;
893			break;
894		    }
895		    content = &(ms->next);
896		}
897		msg->sg.maxprot = get_segprot_from_flag(&arch_flag);
898		if((msg->sg.initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE)
899		    msg->sg.maxprot |= VM_PROT_EXECUTE;
900		msg->prot_set = TRUE;
901	    }
902	    p = &(msg->next);
903	}
904
905	/*
906	 * Set the address of the first segment via the -seg1addr option or the
907	 * -segs_read_only_addr/-segs_read_write_addr options or the default
908	 * first address of the output format.
909	 */
910	if(segs_read_only_addr_specified){
911	    if(segs_read_only_addr % segalign != 0)
912		fatal("-segs_read_only_addr: 0x%x not a multiple of the segment"
913		      " alignment (0x%x)", (unsigned int)segs_read_only_addr,
914		      (unsigned int)segalign);
915	    if(segs_read_write_addr_specified){
916		if(segs_read_write_addr % segalign != 0)
917		    fatal("-segs_read_write_addr: 0x%x not a multiple of the "
918		          "segment alignment (0x%x)",
919			  (unsigned int)segs_read_write_addr,
920			  (unsigned int)segalign);
921	    }
922	    else{
923		segs_read_write_addr = segs_read_only_addr +
924				   get_shared_region_size_from_flag(&arch_flag);
925	    }
926	}
927	first_msg = merged_segments;
928	if(first_msg == &pagezero_segment)
929	    first_msg = first_msg->next;
930	if(first_msg != NULL){
931	    if(seg1addr_specified == TRUE){
932		if(seg1addr % segalign != 0)
933		    fatal("-seg1addr: 0x%x not a multiple of the segment "
934			  "alignment (0x%x)", (unsigned int)seg1addr,
935			  (unsigned int)segalign);
936		if(first_msg->addr_set == TRUE)
937		    fatal("address of first segment: %.16s set both by name "
938			  "and with the -seg1addr option",
939			  first_msg->sg.segname);
940		first_msg->sg.vmaddr = seg1addr;
941		first_msg->addr_set = TRUE;
942	    }
943	    else{
944		if(first_msg->addr_set == FALSE){
945		    if(filetype == MH_EXECUTE &&
946		       pagezero_segment.addr_set == TRUE)
947			first_msg->sg.vmaddr = pagezero_segment.sg.vmsize;
948		    else{
949			if(segs_read_only_addr_specified){
950			    if((first_msg->sg.initprot & VM_PROT_WRITE) == 0)
951				first_msg->sg.vmaddr = segs_read_only_addr;
952			    else
953				first_msg->sg.vmaddr = segs_read_write_addr;
954			}
955			else{
956			    first_msg->sg.vmaddr = 0;
957			}
958		    }
959		    first_msg->addr_set = TRUE;
960		}
961	    }
962	}
963
964	/*
965	 * Size and count the load commands in the output file which include:
966	 *   The segment load commands
967	 *   The load fvmlib commands
968	 *   A symtab command
969	 *   A dysymtab command
970	 *   A thread command (if the file type is NOT MH_DYLIB or MH_FVMLIB)
971	 */
972	ncmds = 0;
973	sizeofcmds = 0;
974	/*
975	 * Size the segment commands and accumulate the number of commands and
976	 * size of them.
977	 */
978	p = &merged_segments;
979	while(*p){
980	    msg = *p;
981	    msg->sg.cmd = LC_SEGMENT;
982	    msg->sg.cmdsize = sizeof(struct segment_command) +
983			      msg->sg.nsects * sizeof(struct section);
984	    ncmds++;
985	    sizeofcmds += msg->sg.cmdsize;
986	    p = &(msg->next);
987	}
988#ifndef RLD
989	/*
990	 * Accumulate the number of commands for the fixed VM shared libraries
991	 * and their size.  Since the commands themselves come from the input
992	 * object files the 'cmd' and 'cmdsize' fields are already set.
993	 */
994	nfvmlibs = 0;
995	q = &merged_fvmlibs;
996	while(*q){
997	    mfl = *q;
998	    nfvmlibs++;
999	    sizeofcmds += mfl->fl->cmdsize;
1000	    q = &(mfl->next);
1001	}
1002	if(filetype == MH_FVMLIB && nfvmlibs != 1){
1003	    if(nfvmlibs == 0)
1004		error("no LC_IDFVMLIB command in the linked object files");
1005	    else
1006		error("more than one LC_IDFVMLIB command in the linked object "
1007		      "files");
1008	}
1009	ncmds += nfvmlibs;
1010
1011	/*
1012	 * If the output file is a dynamicly linked shared library (MH_DYLIB)
1013	 * then create the library identifing information load command.  And
1014	 * if -sub_framework was specified then create the sub_framework
1015	 * command.
1016	 */
1017	if(filetype == MH_DYLIB){
1018	    create_dylib_id_command();
1019	    if(sub_framework == TRUE){
1020		create_sub_framework_command();
1021		sizeofcmds += merged_sub_framework->sub->cmdsize;
1022		ncmds++;
1023	    }
1024	    if(nsub_umbrellas != 0){
1025		sizeofcmds += create_sub_umbrella_commands();
1026		ncmds += nsub_umbrellas;
1027	    }
1028	    if(nsub_librarys != 0){
1029		sizeofcmds += create_sub_library_commands();
1030		ncmds += nsub_librarys;
1031	    }
1032	    if(nallowable_clients != 0){
1033		sizeofcmds += create_sub_client_commands();
1034		ncmds += nallowable_clients;
1035	    }
1036	}
1037	/*
1038	 * If the output file is a dynamic linker (MH_DYLINKER) then create the
1039	 * the dynamic linker identifing information load command.
1040	 */
1041	if(filetype == MH_DYLINKER)
1042	    create_dylinker_id_command();
1043
1044	/*
1045	 * If there is a dynamic linkers then account for the load commands and
1046	 * it's size.  The command already has the 'cmd' and 'cmdsize' fields
1047	 * set.
1048	 */
1049	if(merged_dylinker != NULL){
1050	    sizeofcmds += merged_dylinker->dyld->cmdsize;
1051	    ncmds++;
1052	}
1053
1054	/*
1055	 * Accumulate the number of commands for the dynamicly linked shared
1056	 * libraries and their size.  The commands already have their 'cmd' and
1057	 * 'cmdsize' fields set.
1058	 */
1059	mdl = merged_dylibs;
1060	while(mdl != NULL){
1061	    sizeofcmds += mdl->dl->cmdsize;
1062	    ncmds++;
1063	    /*
1064	     * If -headerpad_max_install_names is specified make sure headerpad
1065	     * is big enough to change all the install name of the dylibs in
1066	     * the output to MAXPATHLEN.
1067	     */
1068	    if(headerpad_max_install_names == TRUE){
1069		if(mdl->dl->cmdsize - sizeof(struct dylib_command) < MAXPATHLEN)
1070		    headerpad += MAXPATHLEN -
1071				 (mdl->dl->cmdsize -
1072				  sizeof(struct dylib_command));
1073	    }
1074	    mdl = mdl->next;
1075	}
1076
1077	/*
1078	 * Accumulate the number of commands for the prebound dynamic libraries
1079	 * and their size.
1080	 */
1081	if(filetype == MH_EXECUTE){
1082	    for(dp = dynamic_libs; dp != NULL; dp = dp->next){
1083		if(dp->type == DYLIB){
1084		    if(dp->pbdylib != NULL){
1085			sizeofcmds += dp->pbdylib->cmdsize;
1086			ncmds++;
1087			/*
1088			 * If -headerpad_max_install_names is specified make
1089			 * sure headerpad is big enough to change all the
1090			 * install name of the dylibs in the output to
1091			 * MAXPATHLEN.
1092			 */
1093			if(headerpad_max_install_names == TRUE){
1094			    if(dp->pbdylib->cmdsize -
1095			       sizeof(struct prebound_dylib_command) <
1096				MAXPATHLEN)
1097				headerpad += MAXPATHLEN -
1098				     (dp->pbdylib->cmdsize -
1099				      sizeof(struct prebound_dylib_command));
1100			}
1101			/*
1102			 * Since we are building this executable prebound we
1103			 * want to have some header padding in case their are
1104			 * more indirectly referenced dylibs that will need to
1105			 * be added when redoing the prebinding.  We have found
1106			 * that in the 10.2 release that 3 times the size of
1107			 * the initial LC_PREBOUND_DYLIB commands seems to work
1108			 * for most but not all things.
1109			 */
1110			headerpad += dp->pbdylib->cmdsize * 3 * indirect_library_ratio;
1111		    }
1112		}
1113	    }
1114	}
1115
1116#endif /* !defined(RLD) */
1117	/*
1118	 * Create the symbol table load command.
1119	 */
1120	output_symtab_info.symtab_command.cmd = LC_SYMTAB;
1121	output_symtab_info.symtab_command.cmdsize =
1122						sizeof(struct symtab_command);
1123	if(strip_level != STRIP_ALL){
1124	    output_symtab_info.symtab_command.nsyms =
1125		nmerged_symbols
1126		- nstripped_merged_symbols
1127		+ nlocal_symbols
1128		- nmerged_symbols_referenced_only_from_dylibs;
1129	    output_symtab_info.symtab_command.strsize =
1130		rnd(merged_string_size +
1131		      local_string_size +
1132		      STRING_SIZE_OFFSET,
1133		      sizeof(unsigned long));
1134	    output_symtab_info.output_strpad =
1135		output_symtab_info.symtab_command.strsize -
1136		(merged_string_size + local_string_size + STRING_SIZE_OFFSET);
1137	    output_symtab_info.output_merged_strsize = STRING_SIZE_OFFSET;
1138	    output_symtab_info.output_local_strsize = STRING_SIZE_OFFSET +
1139						      merged_string_size;
1140	}
1141	ncmds++;
1142	sizeofcmds += output_symtab_info.symtab_command.cmdsize;
1143	/*
1144	 * Create the dynamic symbol table load command.
1145	 */
1146	if(nindirectsyms != 0 || output_for_dyld){
1147	    output_dysymtab_info.dysymtab_command.cmd = LC_DYSYMTAB;
1148	    output_dysymtab_info.dysymtab_command.cmdsize =
1149						sizeof(struct dysymtab_command);
1150	    output_dysymtab_info.dysymtab_command.nindirectsyms = nindirectsyms;
1151	    ncmds++;
1152	    sizeofcmds += output_dysymtab_info.dysymtab_command.cmdsize;
1153	}
1154	/*
1155	 * Create the two-level namespace hints load command.
1156	 */
1157	if(output_for_dyld && twolevel_namespace == TRUE &&
1158	   twolevel_namespace_hints == TRUE){
1159	    output_hints_info.twolevel_hints_command.cmd = LC_TWOLEVEL_HINTS;
1160	    output_hints_info.twolevel_hints_command.cmdsize =
1161					sizeof(struct twolevel_hints_command);
1162	    ncmds++;
1163	    sizeofcmds += output_hints_info.twolevel_hints_command.cmdsize;
1164	}
1165	/*
1166	 * Create the prebind cksum load command.
1167	 */
1168	if(prebinding == TRUE && macosx_deployment_target.major >= 2){
1169	    output_cksum_info.prebind_cksum_command.cmd = LC_PREBIND_CKSUM;
1170	    output_cksum_info.prebind_cksum_command.cmdsize =
1171					sizeof(struct prebind_cksum_command);
1172	    output_cksum_info.prebind_cksum_command.cksum = 0;
1173	    ncmds++;
1174	    sizeofcmds += output_cksum_info.prebind_cksum_command.cmdsize;
1175	}
1176	/*
1177	 * Create the uuid load command.
1178	 */
1179#ifndef KLD
1180	if(output_uuid_info.suppress != TRUE &&
1181	   (output_uuid_info.emit == TRUE ||
1182	    arch_flag.cputype == CPU_TYPE_ARM)){
1183	    output_uuid_info.uuid_command.cmd = LC_UUID;
1184	    output_uuid_info.uuid_command.cmdsize = sizeof(struct uuid_command);
1185	    uuid(&(output_uuid_info.uuid_command.uuid[0]));
1186	    ncmds++;
1187	    sizeofcmds += output_uuid_info.uuid_command.cmdsize;
1188	}
1189#else
1190	if(output_uuid_info.uuid_command.cmdsize != 0){
1191	    ncmds++;
1192	    sizeofcmds += output_uuid_info.uuid_command.cmdsize;
1193	}
1194#endif /* KLD */
1195
1196	/*
1197	 * Create the thread command if this is filetype is to have one.
1198	 */
1199	if(output_thread_info.thread_in_output == TRUE){
1200	    output_thread_info.thread_command.cmdsize =
1201						sizeof(struct thread_command) +
1202						2 * sizeof(long);
1203	    if(arch_flag.cputype == CPU_TYPE_MC680x0){
1204		output_thread_info.flavor = M68K_THREAD_STATE_REGS;
1205		output_thread_info.count = M68K_THREAD_STATE_REGS_COUNT;
1206		output_thread_info.entry_point = &(mc680x0.pc);
1207		output_thread_info.stack_pointer = &(mc680x0.areg[7]);
1208		output_thread_info.state = &mc680x0;
1209		output_thread_info.thread_command.cmdsize += sizeof(long) *
1210					    M68K_THREAD_STATE_REGS_COUNT;
1211	    }
1212	    else if(arch_flag.cputype == CPU_TYPE_POWERPC ||
1213		    arch_flag.cputype == CPU_TYPE_VEO){
1214		output_thread_info.flavor = PPC_THREAD_STATE;
1215		output_thread_info.count = PPC_THREAD_STATE_COUNT;
1216		output_thread_info.entry_point = (int *)&(powerpc.srr0);
1217		output_thread_info.stack_pointer = (int *)&(powerpc.r1);
1218		output_thread_info.state = &powerpc;
1219		output_thread_info.thread_command.cmdsize += sizeof(long) *
1220					    PPC_THREAD_STATE_COUNT;
1221	    }
1222	    else if(arch_flag.cputype == CPU_TYPE_MC88000){
1223		output_thread_info.flavor = M88K_THREAD_STATE_GRF;
1224		output_thread_info.count = M88K_THREAD_STATE_GRF_COUNT;
1225		output_thread_info.entry_point = (int *)&(mc88000.xip);
1226		output_thread_info.stack_pointer = (int *)&(mc88000.r31);
1227		output_thread_info.state = &mc88000;
1228		output_thread_info.thread_command.cmdsize += sizeof(long) *
1229					    M88K_THREAD_STATE_GRF_COUNT;
1230	    }
1231	    else if(arch_flag.cputype == CPU_TYPE_I860){
1232		output_thread_info.flavor = I860_THREAD_STATE_REGS;
1233		output_thread_info.count = I860_THREAD_STATE_REGS_COUNT;
1234		output_thread_info.entry_point = &(i860.pc);
1235		output_thread_info.stack_pointer = &(i860.ireg[0]);
1236		output_thread_info.state = &i860;
1237		output_thread_info.thread_command.cmdsize += sizeof(long) *
1238					  I860_THREAD_STATE_REGS_COUNT;
1239	    }
1240	    else if(arch_flag.cputype == CPU_TYPE_I386){
1241		output_thread_info.flavor = i386_THREAD_STATE;
1242		output_thread_info.count = i386_THREAD_STATE_COUNT;
1243		output_thread_info.entry_point = (int *)&(intel386.eip);
1244		output_thread_info.stack_pointer = (int *)&(intel386.esp);
1245		intel386.es = USER_DATA_SELECTOR;
1246		intel386.ds = USER_DATA_SELECTOR;
1247		intel386.ss = USER_DATA_SELECTOR;
1248		intel386.cs = USER_CODE_SELECTOR;
1249		output_thread_info.state = &intel386;
1250		output_thread_info.thread_command.cmdsize += sizeof(long) *
1251					    i386_THREAD_STATE_COUNT;
1252	    }
1253	    else if(arch_flag.cputype == CPU_TYPE_HPPA){
1254		output_thread_info.flavor = HPPA_FRAME_THREAD_STATE;
1255		output_thread_info.count = HPPA_FRAME_THREAD_STATE_COUNT;
1256		output_thread_info.entry_point =
1257				(int *)&(hppa_frame_state.ts_pcoq_front);
1258		output_thread_info.state = &hppa_frame_state;
1259		output_thread_info.thread_command.cmdsize += sizeof(long) *
1260					    HPPA_FRAME_THREAD_STATE_COUNT;
1261		if(stack_addr_specified == TRUE){
1262		    output_thread_info.second_flavor =
1263						HPPA_INTEGER_THREAD_STATE;
1264		    output_thread_info.second_count =
1265						HPPA_INTEGER_THREAD_STATE_COUNT;
1266		    output_thread_info.stack_pointer =
1267					(int *)&(hppa_integer_state.ts_gr30);
1268		    output_thread_info.second_state = &hppa_integer_state;
1269		    output_thread_info.thread_command.cmdsize +=
1270				sizeof(long) * HPPA_INTEGER_THREAD_STATE_COUNT +
1271				2 * sizeof(long);
1272		}
1273	    }
1274	    else if (arch_flag.cputype == CPU_TYPE_SPARC) {
1275	      output_thread_info.flavor = SPARC_THREAD_STATE_REGS;
1276	      output_thread_info.count = SPARC_THREAD_STATE_REGS_COUNT;
1277	      output_thread_info.entry_point = &(sparc_state.regs.r_pc);
1278	      output_thread_info.stack_pointer = &(sparc_state.regs.r_sp);
1279	      output_thread_info.state = &sparc_state;
1280	      output_thread_info.thread_command.cmdsize += sizeof(long) *
1281		SPARC_THREAD_STATE_REGS_COUNT;
1282	    }
1283	    else if (arch_flag.cputype == CPU_TYPE_ARM) {
1284	      output_thread_info.flavor = ARM_THREAD_STATE;
1285	      output_thread_info.count = ARM_THREAD_STATE_COUNT;
1286	      output_thread_info.entry_point = (int *)&(arm_state.__pc);
1287	      output_thread_info.stack_pointer = (int *)&(arm_state.__sp);
1288	      output_thread_info.state = &arm_state;
1289	      output_thread_info.thread_command.cmdsize += sizeof(long) *
1290		ARM_THREAD_STATE_COUNT;
1291	    }
1292	    else{
1293		fatal("internal error: layout_segments() called with unknown "
1294		      "cputype (%d) set", arch_flag.cputype);
1295	    }
1296	    sizeofcmds += output_thread_info.thread_command.cmdsize;
1297	    ncmds++;
1298	}
1299
1300	/*
1301	 * Create the routines command if this is filetype is to have one.
1302	 */
1303	if(output_routines_info.routines_in_output == TRUE){
1304	    sizeofcmds += output_routines_info.routines_command.cmdsize;
1305	    ncmds++;
1306	}
1307
1308	/*
1309	 * Fill in the mach_header for the output file.
1310	 */
1311	output_mach_header.magic = MH_MAGIC;
1312	output_mach_header.cputype = arch_flag.cputype;
1313	output_mach_header.cpusubtype = arch_flag.cpusubtype;
1314	output_mach_header.filetype = filetype;
1315	output_mach_header.ncmds = ncmds;
1316	output_mach_header.sizeofcmds = sizeofcmds;
1317	output_mach_header.flags = 0;
1318	if(base_obj != NULL)
1319	    output_mach_header.flags |= MH_INCRLINK;
1320	if(output_for_dyld){
1321	    output_mach_header.flags |= MH_DYLDLINK;
1322	    if(bind_at_load)
1323		output_mach_header.flags |= MH_BINDATLOAD;
1324	    if(segs_read_only_addr_specified)
1325		output_mach_header.flags |= MH_SPLIT_SEGS;
1326	    if(twolevel_namespace)
1327		output_mach_header.flags |= MH_TWOLEVEL;
1328	    if(force_flat_namespace)
1329		output_mach_header.flags |= MH_FORCE_FLAT;
1330	    if(nomultidefs)
1331		output_mach_header.flags |= MH_NOMULTIDEFS;
1332	    if(no_fix_prebinding)
1333		output_mach_header.flags |= MH_NOFIXPREBINDING;
1334	}
1335	if(some_non_subsection_via_symbols_objects == FALSE)
1336	    output_mach_header.flags |= MH_SUBSECTIONS_VIA_SYMBOLS;
1337	if(allow_stack_execute == TRUE)
1338	    output_mach_header.flags |= MH_ALLOW_STACK_EXECUTION;
1339
1340	/*
1341	 * The total headers size needs to be known in the case of MH_EXECUTE,
1342	 * MH_BUNDLE, MH_FVMLIB, MH_DYLIB and MH_DYLINKER format file types
1343	 * because their headers get loaded as part of of the first segment.
1344	 * For the MH_FVMLIB and MH_DYLINKER file types the headers are placed
1345	 * on their own page or pages (the size of the segment alignment).
1346	 */
1347	headers_size = sizeof(struct mach_header) + sizeofcmds;
1348	if(filetype == MH_FVMLIB){
1349	    if(headers_size > segalign)
1350		fatal("size of headers (0x%x) exceeds the segment alignment "
1351		      "(0x%x) (would cause the addresses not to be fixed)",
1352		      (unsigned int)headers_size, (unsigned int)segalign);
1353	    headers_size = segalign;
1354	}
1355	else if(filetype == MH_DYLINKER){
1356	    headers_size = rnd(headers_size, segalign);
1357	}
1358
1359	/*
1360	 * For MH_EXECUTE, MH_BUNDLE, and MH_DYLIB formats the as much of the
1361	 * segment padding that can be is moved to the begining of the segment
1362	 * just after the headers.  This is done so that the headers could
1363	 * added to by a smart program like segedit(1) some day.
1364	 */
1365	if(filetype == MH_EXECUTE ||
1366	   filetype == MH_BUNDLE ||
1367	   filetype == MH_DYLIB){
1368	    if(first_msg != NULL){
1369		size = 0;
1370		content = &(first_msg->content_sections);
1371		if(*content){
1372		    max_first_align = 1 << (*content)->s.align;
1373		    while(*content){
1374			ms = *content;
1375			if((unsigned long)(1 << ms->s.align) > segalign)
1376			    error("alignment (0x%x) of section (%.16s,%.16s) "
1377				  "greater than the segment alignment (0x%x)",
1378				  (unsigned int)(1 << ms->s.align),
1379				  ms->s.segname, ms->s.sectname,
1380				  (unsigned int)segalign);
1381			size = rnd(size, 1 << ms->s.align);
1382			if((unsigned long)(1 << ms->s.align) > max_first_align)
1383			    max_first_align = 1 << ms->s.align;
1384			size += ms->s.size;
1385			content = &(ms->next);
1386		    }
1387		    if(errors == 0){
1388			pad = ((rnd(size + rnd(headers_size,
1389				      max_first_align), segalign) -
1390			       (size + rnd(headers_size, max_first_align))) /
1391				 max_first_align) * max_first_align;
1392			if(pad > headerpad)
1393			    headerpad = pad;
1394			headers_size += headerpad;
1395		    }
1396		}
1397	    }
1398	}
1399
1400	/*
1401	 * Assign the section addresses relitive to the start of their segment
1402	 * to accumulate the file and vm sizes of the segments.
1403	 */
1404	max_align = 1;
1405	p = &merged_segments;
1406	while(*p){
1407	    msg = *p;
1408	    if(msg != &pagezero_segment &&
1409	       msg != &linkedit_segment &&
1410	       msg != &stack_segment){
1411		if(msg == first_msg &&
1412		   (filetype == MH_EXECUTE ||
1413		    filetype == MH_BUNDLE ||
1414		    filetype == MH_FVMLIB ||
1415		    filetype == MH_DYLIB ||
1416		    filetype == MH_DYLINKER))
1417		    addr = headers_size;
1418		else
1419		    addr = 0;
1420		content = &(msg->content_sections);
1421		while(*content){
1422		    ms = *content;
1423		    if((unsigned long)(1 << ms->s.align) > segalign)
1424			error("alignment (0x%x) of section (%.16s,%.16s) "
1425			      "greater than the segment alignment (0x%x)",
1426			      (unsigned int)(1 << ms->s.align), ms->s.segname,
1427			      ms->s.sectname, (unsigned int)segalign);
1428		    if((unsigned long)(1 << ms->s.align) > max_align)
1429			max_align = 1 << ms->s.align;
1430		    addr = rnd(addr, 1 << ms->s.align);
1431		    ms->s.addr = addr;
1432		    addr += ms->s.size;
1433		    content = &(ms->next);
1434		}
1435		if(msg == &object_segment)
1436		    msg->sg.filesize = addr;
1437		else
1438		    msg->sg.filesize = rnd(addr, segalign);
1439		zerofill = &(msg->zerofill_sections);
1440		while(*zerofill){
1441		    ms = *zerofill;
1442		    if((unsigned long)(1 << ms->s.align) > segalign)
1443			error("alignment (0x%x) of section (%.16s,%.16s) "
1444			      "greater than the segment alignment (0x%x)",
1445			      (unsigned int)(1 << ms->s.align), ms->s.segname,
1446			      ms->s.sectname, (unsigned int)segalign);
1447		    if((unsigned long)(1 << ms->s.align) > max_align)
1448			max_align = 1 << ms->s.align;
1449		    addr = rnd(addr, 1 << ms->s.align);
1450		    ms->s.addr = addr;
1451		    addr += ms->s.size;
1452		    zerofill = &(ms->next);
1453		}
1454		if(msg == &object_segment)
1455		    msg->sg.vmsize = addr;
1456		else
1457		    msg->sg.vmsize = rnd(addr, segalign);
1458	    }
1459	    p = &(msg->next);
1460	}
1461
1462#ifdef RLD
1463	/*
1464	 * For rld() the output format is MH_OBJECT and the contents of the
1465	 * first segment (the entire vmsize not just the filesize), if it exists,
1466	 * plus headers are allocated and the address the segment is linked to
1467	 * is the address of this memory.
1468	 */
1469	output_size = 0;
1470
1471	headers_size = rnd(headers_size, max_align);
1472	output_size = headers_size;
1473	if(first_msg != NULL)
1474	    output_size += first_msg->sg.vmsize;
1475	allocate_size = output_size;
1476	if(strip_level != STRIP_ALL)
1477	    allocate_size += output_symtab_info.symtab_command.nsyms *
1478				sizeof(struct nlist) +
1479				output_symtab_info.symtab_command.strsize;
1480
1481#ifdef SA_RLD
1482	if(allocate_size > sa_rld_output_size)
1483	    fatal("not enough memory for output of size %lu (memory "
1484		    "available %lu)", allocate_size, sa_rld_output_size);
1485	output_addr = sa_rld_output_addr;
1486#else /* !defined(SA_RLD) */
1487	if((r = vm_allocate(mach_task_self(), (vm_address_t *)&output_addr,
1488			    allocate_size, TRUE)) != KERN_SUCCESS)
1489	    mach_fatal(r, "can't vm_allocate() memory for output of size "
1490			"%lu", allocate_size);
1491	/*
1492	 * The default initial protection for vm_allocate()'ed memory
1493	 * may not include VM_PROT_EXECUTE so we need to raise the
1494	 * the protection to VM_PROT_ALL which include this.
1495	 */
1496	if((r = vm_protect(mach_task_self(), (vm_address_t)output_addr,
1497	    allocate_size, FALSE, VM_PROT_ALL)) != KERN_SUCCESS)
1498	    mach_fatal(r, "can't set vm_protection on memory for output");
1499#endif /* defined(SA_RLD) */
1500#ifdef RLD_VM_ALLOC_DEBUG
1501	print("rld() vm_allocate: addr = 0x%0x size = 0x%x\n",
1502		(unsigned int)output_addr, (unsigned int)allocate_size);
1503#endif /* RLD_VM_ALLOC_DEBUG */
1504	sets[cur_set].output_addr = output_addr;
1505	sets[cur_set].output_size = output_size;
1506
1507	if(first_msg != NULL){
1508	    if(address_func != NULL){
1509	        if(RLD_DEBUG_OUTPUT_FILENAME_flag)
1510		    first_msg->sg.vmaddr =
1511		      (*address_func)(allocate_size, headers_size)+headers_size;
1512		else
1513		    first_msg->sg.vmaddr =
1514		      (*address_func)(output_size, headers_size) + headers_size;
1515	    }
1516	    else
1517		first_msg->sg.vmaddr = (long)output_addr + headers_size;
1518	}
1519#endif /* RLD */
1520	/*
1521	 * Set the addresses of segments that have not had their addresses set
1522	 * and set the addresses of all sections (previously set relitive to the
1523	 * start of their section and here just moved by the segment address).
1524	 * The addresses of segments that are not set are set to the next
1525	 * available address after the first segment that the vmsize will fit
1526	 * (note that the address of the first segment has been set above).
1527	 */
1528	p = &merged_segments;
1529	while(*p){
1530	    msg = *p;
1531	    /*
1532	     * The first segment has had it's address set previouly above so
1533	     * this test will always fail for it and there is no problem of
1534	     * trying to use first_msg for the first segment.
1535	     */
1536	    if(msg->addr_set == FALSE){
1537		if(segs_read_only_addr_specified){
1538		    if((msg->sg.initprot & VM_PROT_WRITE) == 0)
1539			msg->sg.vmaddr = next_vmaddr(segs_read_only_addr,
1540						     msg->sg.vmsize);
1541		    else
1542			msg->sg.vmaddr = next_vmaddr(segs_read_write_addr,
1543						     msg->sg.vmsize);
1544		}
1545		else{
1546		    msg->sg.vmaddr = next_vmaddr(first_msg->sg.vmaddr,
1547						 msg->sg.vmsize);
1548		}
1549		msg->addr_set = TRUE;
1550	    }
1551	    if(msg != &pagezero_segment &&
1552	       msg != &linkedit_segment &&
1553	       msg != &stack_segment){
1554		content = &(msg->content_sections);
1555		while(*content){
1556		    ms = *content;
1557		    ms->s.addr += msg->sg.vmaddr;
1558		    content = &(ms->next);
1559		}
1560		zerofill = &(msg->zerofill_sections);
1561		while(*zerofill){
1562		    ms = *zerofill;
1563		    ms->s.addr += msg->sg.vmaddr;
1564		    zerofill = &(ms->next);
1565		}
1566	    }
1567	    p = &(msg->next);
1568	}
1569
1570#ifndef RLD
1571	/*
1572	 * Check for overlapping segments (including fvmlib segments).
1573	 */
1574	check_for_overlapping_segments(&linkedit_segment);
1575
1576	/*
1577	 * If prebinding check to see the that the lazy pointer relocation
1578	 * entries are not too far away to fit into the 24-bit r_adderess field
1579	 * of a scattered relocation entry.
1580	 */
1581	if(prebinding)
1582	    check_for_lazy_pointer_relocs_too_far();
1583
1584	if(prebinding)
1585	    output_mach_header.flags |= MH_PREBOUND;
1586#endif /* RLD */
1587
1588	/*
1589	 * Assign all file offsets.  Things with offsets appear in the following
1590	 * two possible orders in the file:
1591	 *
1592	 *   For relocatable objects (not output for dyld)
1593	 *	The segments (and their content sections)
1594	 *	The relocation entries for the content sections
1595	 *	The symbol table
1596	 *	The string table.
1597	 *
1598	 *   For objects output for dyld
1599	 *	The segments (and their content sections)
1600	 *	The local relocation entries
1601	 *	The symbol table
1602	 *		local symbols
1603	 *		external defined symbols
1604	 *		undefined symbols
1605	 *	The two-level namespace hints table
1606	 *	The external relocation entries
1607	 *	The indirect symbol table
1608	 *	The table of contents (MH_DYLIB only)
1609	 *	The module table (MH_DYLIB only)
1610	 *	The reference table (MH_DYLIB only)
1611	 *	The string table.
1612	 *		external strings
1613	 *		local strings
1614	 */
1615	offset = headers_size;
1616	/* set the offset to the segments and sections */
1617	p = &merged_segments;
1618	while(*p){
1619	    msg = *p;
1620	    if(msg != &pagezero_segment &&
1621	       msg != &linkedit_segment &&
1622	       msg != &stack_segment){
1623		if(msg == first_msg &&
1624		   (filetype == MH_EXECUTE ||
1625		    filetype == MH_BUNDLE ||
1626		    filetype == MH_FVMLIB ||
1627		    filetype == MH_DYLIB ||
1628		    filetype == MH_DYLINKER)){
1629		    msg->sg.fileoff = 0;
1630		    content = &(msg->content_sections);
1631		    if(*content){
1632			ms = *content;
1633			offset = ms->s.addr - msg->sg.vmaddr;
1634		    }
1635		}
1636		else
1637		    msg->sg.fileoff = offset;
1638		content = &(msg->content_sections);
1639		while(*content){
1640		    ms = *content;
1641		    ms->s.offset = offset;
1642		    if(ms->next != NULL)
1643			offset += (ms->next->s.addr - ms->s.addr);
1644		    content = &(ms->next);
1645		}
1646		if(msg->sg.filesize == 0)
1647		    msg->sg.fileoff = 0;
1648		if(msg == first_msg &&
1649		   (filetype == MH_EXECUTE ||
1650		    filetype == MH_BUNDLE ||
1651		    filetype == MH_FVMLIB ||
1652		    filetype == MH_DYLIB ||
1653		    filetype == MH_DYLINKER))
1654		    offset = msg->sg.filesize;
1655		else
1656		    if(msg->sg.filesize != 0)
1657			offset = msg->sg.fileoff + msg->sg.filesize;
1658	    }
1659	    p = &(msg->next);
1660	}
1661
1662	/*
1663	 * The offsets to all the link edit structures in the file must be on
1664	 * boundaries that they can be mapped into memory and then used as is.
1665	 * The maximum alignment of all structures in a Mach-O file is
1666	 * sizeof(long) so the offset must be rounded to this as the sections
1667	 * and segments may not be rounded to this.
1668	 */
1669	offset = rnd(offset, sizeof(long));
1670#ifdef RLD
1671	/*
1672	 * For RLD if there is any symbol table it is written past the size
1673	 * of the output_size.  Room has been allocated for it above if the
1674	 * strip_level != STRIP_ALL.
1675	 */
1676	offset = output_size;
1677#endif /* RLD */
1678
1679	/* the linkedit segment will start here */
1680	linkedit_segment.sg.fileoff = offset;
1681
1682	/* set the offset to the relocation entries (if in the output file) */
1683	p = &merged_segments;
1684	while(*p){
1685	    msg = *p;
1686	    content = &(msg->content_sections);
1687	    while(*content){
1688		ms = *content;
1689		if(save_reloc && ms->s.nreloc != 0){
1690		    ms->s.reloff = offset;
1691		    offset += ms->s.nreloc * sizeof(struct relocation_info);
1692		}
1693		else{
1694		    ms->s.reloff = 0;
1695		    ms->s.nreloc = 0;
1696		}
1697		content = &(ms->next);
1698	    }
1699	    p = &(msg->next);
1700	}
1701	if(output_for_dyld){
1702	    if(output_dysymtab_info.dysymtab_command.nlocrel != 0){
1703		output_dysymtab_info.dysymtab_command.locreloff = offset;
1704		offset += output_dysymtab_info.dysymtab_command.nlocrel *
1705			  sizeof(struct relocation_info);
1706	    }
1707	}
1708	/* set the offset to the symbol table (output for dyld case) */
1709	if(output_for_dyld){
1710	    if(strip_level != STRIP_ALL){
1711		output_symtab_info.symtab_command.symoff = offset;
1712		offset += output_symtab_info.symtab_command.nsyms *
1713			  sizeof(struct nlist);
1714	    }
1715	}
1716	/* set the offset to the two-level namespace hints */
1717	if(output_for_dyld && twolevel_namespace == TRUE &&
1718	   twolevel_namespace_hints == TRUE){
1719	    output_hints_info.twolevel_hints_command.offset = offset;
1720	    offset += output_hints_info.twolevel_hints_command.nhints *
1721		      sizeof(struct twolevel_hint);
1722	}
1723	if(output_for_dyld){
1724	    if(output_dysymtab_info.dysymtab_command.nextrel != 0){
1725		output_dysymtab_info.dysymtab_command.extreloff = offset;
1726		offset += output_dysymtab_info.dysymtab_command.nextrel *
1727			  sizeof(struct relocation_info);
1728	    }
1729	}
1730	/* set the offset to the indirect symbol table */
1731	if(nindirectsyms != 0){
1732	    output_dysymtab_info.dysymtab_command.indirectsymoff = offset;
1733	    offset += nindirectsyms * sizeof(unsigned long);
1734	}
1735#ifndef RLD
1736	/* set the offset to the dylib tables */
1737	if(filetype == MH_DYLIB){
1738	    output_dysymtab_info.dysymtab_command.tocoff = offset;
1739	    offset += output_dysymtab_info.dysymtab_command.ntoc *
1740		      sizeof(struct dylib_table_of_contents);
1741	    output_dysymtab_info.dysymtab_command.modtaboff = offset;
1742	    offset += output_dysymtab_info.dysymtab_command.nmodtab *
1743		      sizeof(struct dylib_module);
1744	    output_dysymtab_info.dysymtab_command.extrefsymoff = offset;
1745	    offset += output_dysymtab_info.dysymtab_command.nextrefsyms *
1746		      sizeof(struct dylib_reference);
1747	}
1748#endif /* !defined(RLD) */
1749	/* set the offset to the symbol table (output not for dyld case) */
1750	if(output_for_dyld == FALSE){
1751	    if(strip_level != STRIP_ALL){
1752		output_symtab_info.symtab_command.symoff = offset;
1753		offset += output_symtab_info.symtab_command.nsyms *
1754			  sizeof(struct nlist);
1755	    }
1756	}
1757	/* set the offset to the string table */
1758	if(strip_level != STRIP_ALL){
1759	    output_symtab_info.symtab_command.stroff = offset;
1760	    offset += output_symtab_info.symtab_command.strsize;
1761	}
1762#ifndef RLD
1763	/* set the size of the output file */
1764	output_size = offset;
1765#endif /* !defined(RLD) */
1766
1767	/*
1768	 * Set the output section number in to each merged section to be used
1769	 * to set symbol's section numbers and local relocation section indexes.
1770	 */
1771	i = 1;
1772	p = &merged_segments;
1773	while(*p){
1774	    msg = *p;
1775	    content = &(msg->content_sections);
1776	    while(*content){
1777		ms = *content;
1778		ms->output_sectnum = i++;
1779		content = &(ms->next);
1780	    }
1781	    zerofill = &(msg->zerofill_sections);
1782	    while(*zerofill){
1783		ms = *zerofill;
1784		ms->output_sectnum = i++;
1785		zerofill = &(ms->next);
1786	    }
1787	    p = &(msg->next);
1788	}
1789	if(i > MAX_SECT)
1790	    fatal("too many sections used, maximum is: %d", MAX_SECT);
1791
1792#ifndef RLD
1793	/*
1794	 * Define the loader defined symbols.  This is done here because the
1795	 * address of the headers is needed to defined the symbol for
1796	 * MH_EXECUTE, MH_BUNDLE, MH_DYLIB and MH_DYLINKER filetypes.
1797	 */
1798	if(filetype == MH_EXECUTE &&
1799	   first_msg != NULL && first_msg != &linkedit_segment)
1800	    define_link_editor_execute_symbols(first_msg->sg.vmaddr);
1801	if((filetype == MH_BUNDLE ||
1802	    filetype == MH_DYLIB || filetype == MH_DYLINKER) &&
1803	   first_msg != NULL && first_msg != &linkedit_segment)
1804	    define_link_editor_dylib_symbols(first_msg->sg.vmaddr);
1805	if(filetype == MH_PRELOAD)
1806	    define_link_editor_preload_symbols(FALSE);
1807#endif /* !defined(RLD) */
1808
1809	/*
1810	 * Now with the addresses of the segments and sections set and the
1811	 * sections numbered for the output file set the values and section
1812	 * numbers of the merged symbols.
1813	 */
1814	layout_merged_symbols();
1815
1816	/*
1817	 * Set the entry point to either the specified symbol name's value or
1818	 * the address of the first section.
1819	 */
1820	if(output_thread_info.thread_in_output == TRUE){
1821	    if(entry_point_name != NULL){
1822		merged_symbol = lookup_symbol(entry_point_name);
1823		/*
1824		 * If the symbol is not found, undefined or common the
1825		 * entry point can't be set.
1826		 */
1827		if(merged_symbol->name_len == 0 ||
1828		   merged_symbol->nlist.n_type == (N_EXT | N_UNDF))
1829		    fatal("entry point symbol name: %s not defined",
1830			  entry_point_name);
1831		*output_thread_info.entry_point = merged_symbol->nlist.n_value;
1832	    }
1833	    else{
1834		*output_thread_info.entry_point =
1835					    first_msg->content_sections->s.addr;
1836	    }
1837	    /*
1838	     * Set up wierd hppa instruction address offset queue.
1839	     * iioq_head is the entry point.  iioq_tail is the next instruction.
1840	     */
1841	    if(arch_flag.cputype == CPU_TYPE_HPPA)
1842		hppa_frame_state.ts_pcoq_back =
1843			hppa_frame_state.ts_pcoq_front + 4;
1844	    /*
1845	     * If the stack address is specified set this in the stack pointer.
1846	     */
1847	    if(stack_addr_specified == TRUE &&
1848	       output_thread_info.thread_command.cmd == LC_UNIXTHREAD)
1849		*output_thread_info.stack_pointer = stack_addr;
1850	}
1851	else{
1852	    if(entry_point_name != NULL)
1853		warning("specified entry point symbol name ignored, output "
1854 			"file type has no entry point or no non-zerofill "
1855 			"sections");
1856	}
1857
1858	/*
1859	 * Set the addresses and module indexes in the routine command if that
1860	 * is to appear in the output.
1861	 */
1862	if(output_routines_info.routines_in_output == TRUE){
1863	    if(init_name != NULL){
1864		merged_symbol = lookup_symbol(init_name);
1865		/*
1866		 * If the symbol is not found, undefined or common the
1867		 * initialization routine address can't be set.
1868		 */
1869		if(merged_symbol->name_len == 0 ||
1870		   merged_symbol->nlist.n_type == (N_EXT | N_UNDF))
1871		    fatal("initialization routine symbol name: %s not defined",
1872			  init_name);
1873		if(arch_flag.cputype == CPU_TYPE_ARM &&
1874		   (merged_symbol->nlist.n_desc & N_ARM_THUMB_DEF))
1875		    /* Have to set the low-order bit if symbol is Thumb */
1876		    output_routines_info.routines_command.init_address =
1877			merged_symbol->nlist.n_value | 1;
1878		else
1879		    output_routines_info.routines_command.init_address =
1880			merged_symbol->nlist.n_value;
1881		output_routines_info.routines_command.init_module =
1882		    merged_symbol->definition_object->imodtab;
1883	    }
1884	}
1885	else{
1886	    if(init_name != NULL)
1887		warning("specified initialization routine symbol name ignored, "
1888			"output file type has no initialization routine");
1889	}
1890}
1891
1892/*
1893 * next_vmaddr() is passed in a vmaddr and vmsize and returns then next highest
1894 * vmaddr that will fit the vmsize in the merged segments which have addresses
1895 * assigned.
1896 */
1897static
1898unsigned long
1899next_vmaddr(
1900unsigned long vmaddr,
1901unsigned long vmsize)
1902{
1903    unsigned long i, n;
1904    struct merged_segment *msg, **sorted_merged_segments;
1905
1906	/*
1907	 * Count the of merged segments with addresses set of non-zero size.
1908	 */
1909	n = 0;
1910	for(msg = merged_segments; msg != NULL ; msg = msg->next){
1911	    if(msg->addr_set == TRUE && msg->sg.vmsize != 0)
1912		n++;
1913	}
1914	/*
1915	 * If no merged segment of non-zero size has an address set return the
1916	 * vmaddr passed in.
1917	 */
1918	if(n == 0)
1919	    return(vmaddr);
1920
1921	/*
1922	 * Create a list of merged segments sorted by vmaddr for the merged
1923	 * segments with addresses set of non-zero size.
1924	 */
1925	sorted_merged_segments = (struct merged_segment **)
1926				 allocate(n * sizeof(struct merged_segment *));
1927	i = 0;
1928	for(msg = merged_segments; msg != NULL ; msg = msg->next){
1929	    if(msg->addr_set == TRUE && msg->sg.vmsize != 0){
1930		sorted_merged_segments[i] = msg;
1931		i++;
1932	    }
1933	}
1934	qsort(sorted_merged_segments, n, sizeof(struct merged_segment *),
1935	      (int (*)(const void *, const void *))qsort_vmaddr);
1936
1937	/*
1938	 * Find the next highest address from the vmaddr passed in that will fit
1939	 * the vmsize passed in.  This may wrap around to lower addresses.  Also
1940	 * if all address space is taken this will return a address that
1941	 * overlaps.
1942	 */
1943	for(i = 0; i < n; i++){
1944	    if(vmaddr < sorted_merged_segments[i]->sg.vmaddr){
1945		if(vmaddr + vmsize <= sorted_merged_segments[i]->sg.vmaddr)
1946		    goto done;
1947		vmaddr = sorted_merged_segments[i]->sg.vmaddr +
1948			 sorted_merged_segments[i]->sg.vmsize;
1949	    }
1950	    if(vmaddr < sorted_merged_segments[i]->sg.vmaddr +
1951			sorted_merged_segments[i]->sg.vmsize){
1952		vmaddr = sorted_merged_segments[i]->sg.vmaddr +
1953			 sorted_merged_segments[i]->sg.vmsize;
1954	    }
1955	}
1956done:
1957	free(sorted_merged_segments);
1958	return(vmaddr);
1959}
1960
1961/*
1962 * Function for qsort to sort merged_segments by their vmaddr.
1963 */
1964static
1965int
1966qsort_vmaddr(
1967const struct merged_segment **msg1,
1968const struct merged_segment **msg2)
1969{
1970	if((*msg1)->sg.vmaddr < (*msg2)->sg.vmaddr)
1971	    return(-1);
1972	if((*msg1)->sg.vmaddr == (*msg2)->sg.vmaddr)
1973	    return(0);
1974	/* (*msg1)->sg.vmaddr > (*msg2)->sg.vmaddr */
1975	    return(1);
1976}
1977
1978#ifndef RLD
1979/*
1980 * check_reserved_segment() checks to that the reserved segment is NOT in the
1981 * merged segment list and returns TRUE if so.  If it the segment is found in
1982 * the merged segment list it prints an error message stating the segment exists
1983 * and prints the string passed to it as to why this segment is reserved.  Then
1984 * It prints all sections created from files in that segment and all object
1985 * files that contain that segment.  Finally it returns FALSE in this case.
1986 */
1987static
1988enum bool
1989check_reserved_segment(
1990char *segname,
1991char *reserved_error_string)
1992{
1993    struct merged_segment *msg;
1994    struct merged_section **content, *ms;
1995
1996    unsigned long i, j;
1997    struct object_list *object_list, **q;
1998    struct object_file *object_file;
1999    struct section *s;
2000
2001	msg = lookup_merged_segment(segname);
2002	if(msg != NULL){
2003	    error("segment %s exist in the output file (%s)", segname,
2004		  reserved_error_string);
2005	    /*
2006	     * Loop through the content sections and report any sections
2007	     * created from files.
2008	     */
2009	    content = &(msg->content_sections);
2010	    while(*content){
2011		ms = *content;
2012		if(ms->contents_filename != NULL)
2013		    print("section (%.16s,%.16s) created from file "
2014			   "%s\n", ms->s.segname, ms->s.sectname,
2015			   ms->contents_filename);
2016		content = &(ms->next);
2017	    }
2018	    /*
2019	     * Loop through all the objects and report those that have
2020	     * this segment.
2021	     */
2022	    for(q = &objects; *q; q = &(object_list->next)){
2023		object_list = *q;
2024		for(i = 0; i < object_list->used; i++){
2025		    object_file = &(object_list->object_files[i]);
2026		    if(object_file == base_obj)
2027			continue;
2028		    if(object_file->dylib)
2029			continue;
2030		    if(object_file->bundle_loader)
2031			continue;
2032		    if(object_file->dylinker)
2033			continue;
2034		    for(j = 0; j < object_file->nsection_maps; j++){
2035			s = object_file->section_maps[j].s;
2036			if(strcmp(s->segname, segname) == 0){
2037			    print_obj_name(object_file);
2038			    print("contains section (%.16s,%.16s)\n",
2039				   s->segname, s->sectname);
2040			}
2041		    }
2042		}
2043	    }
2044	    return(FALSE);
2045	}
2046	return(TRUE);
2047}
2048
2049/*
2050 * check_for_overlapping_segments() checks for overlapping segments in the
2051 * output file and in the segments from the fixed VM shared libraries it uses.
2052 */
2053static
2054void
2055check_for_overlapping_segments(
2056struct merged_segment *outputs_linkedit_segment)
2057{
2058    struct merged_segment **p1, **p2, **last_merged, **last_fvmseg, **last_base,
2059			  *msg1, *msg2;
2060
2061	/*
2062	 * To make the checking loops below clean the fvmlib segment list is
2063	 * attached to the end of the merged segment list and then detached
2064	 * before we return.
2065	 */
2066	last_merged = &merged_segments;
2067	while(*last_merged){
2068	    msg1 = *last_merged;
2069	    last_merged = &(msg1->next);
2070	}
2071	if(fvmlib_segments != NULL){
2072	    *last_merged = fvmlib_segments;
2073
2074	    last_fvmseg = &fvmlib_segments;
2075	    while(*last_fvmseg){
2076		msg1 = *last_fvmseg;
2077		last_fvmseg = &(msg1->next);
2078	    }
2079	}
2080	else
2081	    last_fvmseg = last_merged;
2082	*last_fvmseg = base_obj_segments;
2083
2084	p1 = &merged_segments;
2085	while(*p1){
2086	    msg1 = *p1;
2087	    p2 = &(msg1->next);
2088	    while(*p2){
2089		msg2 = *p2;
2090		check_overlap(msg1, msg2, FALSE, outputs_linkedit_segment);
2091		p2 = &(msg2->next);
2092	    }
2093	    p1 = &(msg1->next);
2094	}
2095
2096	/*
2097	 * If we are doing prebinding add the segments from the dylibs and
2098	 * then check for overlap with these segments.
2099	 */
2100	if(prebinding && dylib_segments != NULL){
2101	    /* first add on the dylib segments */
2102	    last_base = last_fvmseg;
2103	    while(*last_base){
2104		msg1 = *last_base;
2105		last_base = &(msg1->next);
2106	    }
2107	    *last_base = dylib_segments;
2108	    /* now check overlap for prebinding */
2109	    p1 = &merged_segments;
2110	    while(*p1){
2111		msg1 = *p1;
2112		p2 = &(msg1->next);
2113		while(*p2){
2114		    msg2 = *p2;
2115		    check_overlap(msg1, msg2, TRUE, outputs_linkedit_segment);
2116		    p2 = &(msg2->next);
2117		}
2118		p1 = &(msg1->next);
2119	    }
2120	    /* take off the dylib segments */
2121	    *last_base = NULL;
2122	}
2123
2124	/* take off the fvmlib segments */
2125	*last_merged = NULL;
2126
2127	/* take off the base_obj segments */
2128	*last_fvmseg = NULL;
2129}
2130
2131/*
2132 * check_overlap() checks if the two segments passed to it overlap and if so
2133 * prints an error message if prebind_check is FALSE.  If prebind_check is
2134 * TRUE it prints a warning that prebind is will be disabled.  In either case
2135 * prebinding is disabled if there is an overlap.
2136 */
2137static
2138void
2139check_overlap(
2140struct merged_segment *msg1,
2141struct merged_segment *msg2,
2142enum bool prebind_check,
2143struct merged_segment *outputs_linkedit_segment)
2144{
2145    char *not;
2146
2147	if(msg1->sg.vmsize == 0 || msg2->sg.vmsize == 0)
2148	    return;
2149
2150	if(msg1->sg.vmaddr > msg2->sg.vmaddr){
2151	    if(msg2->sg.vmaddr + msg2->sg.vmsize <= msg1->sg.vmaddr)
2152		return;
2153	}
2154	else{
2155	    if(msg1->sg.vmaddr + msg1->sg.vmsize <= msg2->sg.vmaddr)
2156		return;
2157	}
2158	if(prebind_check == FALSE)
2159	    error("%.16s segment (address = 0x%x size = 0x%x) of %s overlaps "
2160		  "with %.16s segment (address = 0x%x size = 0x%x) of %s",
2161		  msg1->sg.segname, (unsigned int)(msg1->sg.vmaddr),
2162		  (unsigned int)(msg1->sg.vmsize), msg1->filename,
2163		  msg2->sg.segname, (unsigned int)(msg2->sg.vmaddr),
2164		  (unsigned int)(msg2->sg.vmsize), msg2->filename);
2165	else{
2166	    if((segs_read_only_addr_specified &&
2167		((msg1 == outputs_linkedit_segment &&
2168			msg2->split_dylib == TRUE)||
2169	         (msg2 == outputs_linkedit_segment &&
2170			msg1->split_dylib == TRUE))) ||
2171		 (msg1->split_dylib == TRUE &&
2172			strcmp(msg1->sg.segname, SEG_LINKEDIT) == 0) ||
2173		 (msg2->split_dylib == TRUE &&
2174			strcmp(msg2->sg.segname, SEG_LINKEDIT) == 0)){
2175		warning("prebinding not disabled even though (%.16s segment "
2176			"(address = 0x%x size = 0x%x) of %s overlaps with "
2177			"%.16s segment (address = 0x%x size = 0x%x) of %s on "
2178			"the assumption that the stripped output will not "
2179			"overlap",
2180			msg1->sg.segname, (unsigned int)(msg1->sg.vmaddr),
2181			(unsigned int)(msg1->sg.vmsize), msg1->filename,
2182			msg2->sg.segname, (unsigned int)(msg2->sg.vmaddr),
2183			(unsigned int)(msg2->sg.vmsize), msg2->filename);
2184		return;
2185	    }
2186	    if(prebind_allow_overlap == TRUE)
2187		not = " not";
2188	    else
2189		not = "";
2190	    warning("prebinding%s disabled because (%.16s segment (address = "
2191		    "0x%x size = 0x%x) of %s overlaps with %.16s segment "
2192		    "(address = 0x%x size = 0x%x) of %s", not,
2193		    msg1->sg.segname, (unsigned int)(msg1->sg.vmaddr),
2194		    (unsigned int)(msg1->sg.vmsize), msg1->filename,
2195		    msg2->sg.segname, (unsigned int)(msg2->sg.vmaddr),
2196		    (unsigned int)(msg2->sg.vmsize), msg2->filename);
2197	    if(prebind_allow_overlap == TRUE)
2198		return;
2199	    if(ld_trace_prebinding_disabled == TRUE)
2200	      ld_trace("[Logging for XBS] prebinding disabled "
2201		       "for %s because (%.16s segment (address = 0x%x size = "
2202		       "0x%x) of %s overlaps with %.16s segment (address = 0x%x "
2203		       "size = 0x%x) of %s\n", final_output != NULL ?
2204		       final_output : outputfile,
2205		       msg1->sg.segname, (unsigned int)(msg1->sg.vmaddr),
2206		       (unsigned int)(msg1->sg.vmsize), msg1->filename,
2207		       msg2->sg.segname, (unsigned int)(msg2->sg.vmaddr),
2208		       (unsigned int)(msg2->sg.vmsize), msg2->filename);
2209	}
2210	prebinding = FALSE;
2211}
2212
2213/*
2214 * check_for_lazy_pointer_relocs_too_far() is call when prebinding is TRUE and
2215 * checks to see that the lazy pointer's will not be too far away to overflow
2216 * the the 24-bit r_address field of a scattered relocation entry.  If so
2217 * then prebinding will be disabled.
2218 */
2219static
2220void
2221check_for_lazy_pointer_relocs_too_far(
2222void)
2223{
2224    struct merged_segment **p, *msg;
2225    struct merged_section **content, *ms;
2226    unsigned long base_addr;
2227
2228	if(segs_read_only_addr_specified == TRUE)
2229	    base_addr = segs_read_write_addr;
2230	else
2231	    base_addr = merged_segments->sg.vmaddr;
2232
2233	p = &merged_segments;
2234	while(*p && prebinding){
2235	    msg = *p;
2236	    content = &(msg->content_sections);
2237	    while(*content && prebinding){
2238		ms = *content;
2239		if((ms->s.flags & SECTION_TYPE) ==
2240		   S_LAZY_SYMBOL_POINTERS){
2241		    if(((ms->s.addr + ms->s.size) - base_addr) & 0xff000000){
2242			warning("prebinding disabled because output is too "
2243				"large (limitation of the 24-bit r_address "
2244				"field of scattered relocation entries)");
2245			prebinding = FALSE;
2246		    }
2247		}
2248		content = &(ms->next);
2249	    }
2250	    p = &(msg->next);
2251	}
2252}
2253
2254/*
2255 * print_load_map() is called from layout() if a load map is requested.
2256 */
2257static
2258void
2259print_load_map(void)
2260{
2261    unsigned long i;
2262    struct merged_segment *msg;
2263    struct merged_section *ms;
2264    struct common_symbol *common_symbol;
2265
2266	print("Load map for: %s\n", outputfile);
2267	print("Segment name     Section name     Address    Size\n");
2268	for(msg = merged_segments; msg ; msg = msg->next){
2269	    print("%-16.16s %-16.16s 0x%08x 0x%08x\n",
2270		   msg->sg.segname, "", (unsigned int)(msg->sg.vmaddr),
2271		   (unsigned int)(msg->sg.vmsize));
2272	    for(ms = msg->content_sections; ms ; ms = ms->next){
2273		print("%-16.16s %-16.16s 0x%08x 0x%08x",
2274		       ms->s.segname, ms->s.sectname,
2275		       (unsigned int)(ms->s.addr), (unsigned int)(ms->s.size));
2276		if(ms->contents_filename)
2277		    print(" from the file: %s\n", ms->contents_filename);
2278		else{
2279		    if(ms->order_load_maps){
2280			print("\n");
2281			for(i = 0; i < ms->norder_load_maps; i++){
2282			    if(dead_strip == FALSE ||
2283			       ms->order_load_maps[i].load_order->
2284			       fine_reloc->live == TRUE){
2285				print("\t\t\t\t  0x%08x 0x%08x ",
2286				  (unsigned int)(fine_reloc_output_offset(
2287				    ms->order_load_maps[i].section_map,
2288				    ms->order_load_maps[i].value -
2289				    ms->order_load_maps[i].section_map->
2290					s->addr) +
2291				    ms->order_load_maps[i].section_map->
2292					output_section->s.addr),
2293				    (unsigned int)
2294					(ms->order_load_maps[i].size));
2295			    }
2296			    else{
2297				print("\t\t\t\t  (dead stripped)       ");
2298			    }
2299			    if(ms->order_load_maps[i].archive_name != NULL)
2300				print("%s:",
2301					   ms->order_load_maps[i].archive_name);
2302			    if(ms->order_load_maps[i].symbol_name != NULL)
2303				print("%s:%s\n",
2304				      ms->order_load_maps[i].object_name,
2305				      ms->order_load_maps[i].symbol_name);
2306			    else
2307				print("%s\n",
2308				      ms->order_load_maps[i].object_name);
2309			}
2310		    }
2311		    else{
2312			print("\n");
2313			print_load_map_for_objects(ms);
2314		    }
2315		}
2316	    }
2317	    for(ms = msg->zerofill_sections; ms ; ms = ms->next){
2318		print("%-16.16s %-16.16s 0x%08x 0x%08x\n",
2319		       ms->s.segname, ms->s.sectname,
2320		       (unsigned int)(ms->s.addr), (unsigned int)(ms->s.size));
2321		if(ms->order_load_maps){
2322		    for(i = 0; i < ms->norder_load_maps; i++){
2323			if(dead_strip == FALSE ||
2324			   ms->order_load_maps[i].load_order->
2325			   fine_reloc->live == TRUE){
2326			    print("\t\t\t\t  0x%08x 0x%08x ",
2327				(unsigned int)(fine_reloc_output_offset(
2328				    ms->order_load_maps[i].section_map,
2329				    ms->order_load_maps[i].value -
2330				    ms->order_load_maps[i].section_map->
2331					s->addr) +
2332				    ms->order_load_maps[i].section_map->
2333					output_section->s.addr),
2334				    (unsigned int)
2335					(ms->order_load_maps[i].size));
2336			}
2337			else{
2338			    print("\t\t\t\t  (dead stripped)       ");
2339			}
2340
2341/* old
2342			print("\t\t\t\t  0x%08x 0x%08x ",
2343			    (unsigned int)(fine_reloc_output_offset(
2344				ms->order_load_maps[i].section_map,
2345				ms->order_load_maps[i].value -
2346				ms->order_load_maps[i].section_map->s->addr) +
2347				ms->order_load_maps[i].section_map->
2348							output_section->s.addr),
2349				(unsigned int)(ms->order_load_maps[i].size));
2350*/
2351			if(ms->order_load_maps[i].archive_name != NULL)
2352			    print("%s:", ms->order_load_maps[i].archive_name);
2353			print("%s:%s\n",
2354			      ms->order_load_maps[i].object_name,
2355			      ms->order_load_maps[i].symbol_name);
2356		    }
2357		}
2358		else{
2359		    print_load_map_for_objects(ms);
2360		    if(common_load_map.common_ms == ms){
2361			common_symbol = common_load_map.common_symbols;
2362			for(i = 0; i < common_load_map.ncommon_symbols; i++){
2363			    print("\t\t\t\t  0x%08x 0x%08x symbol: %s\n",
2364			       (unsigned int)
2365				(common_symbol->merged_symbol->nlist.n_value),
2366			       (unsigned int)(common_symbol->common_size),
2367			       common_symbol->merged_symbol->nlist.n_un.n_name);
2368			    common_symbol++;
2369			}
2370			common_load_map.common_ms = NULL;
2371			common_load_map.ncommon_symbols = 0;
2372			free(common_load_map.common_symbols);
2373		    }
2374		}
2375	    }
2376	    if(msg->next != NULL)
2377		print("\n");
2378	}
2379
2380	if(base_obj){
2381	    print("\nLoad map for base file: %s\n", base_obj->file_name);
2382	    print("Segment name     Section name     Address    Size\n");
2383	    for(msg = base_obj_segments; msg ; msg = msg->next){
2384		print("%-16.16s %-16.16s 0x%08x 0x%08x\n",
2385		      msg->sg.segname, "", (unsigned int)(msg->sg.vmaddr),
2386		      (unsigned int)(msg->sg.vmsize));
2387	    }
2388	}
2389
2390	if(fvmlib_segments != NULL){
2391	    print("\nLoad map for fixed VM shared libraries\n");
2392	    print("Segment name     Section name     Address    Size\n");
2393	    for(msg = fvmlib_segments; msg ; msg = msg->next){
2394		print("%-16.16s %-16.16s 0x%08x 0x%08x %s\n",
2395		      msg->sg.segname, "", (unsigned int)(msg->sg.vmaddr),
2396		      (unsigned int)(msg->sg.vmsize), msg->filename);
2397	    }
2398	}
2399}
2400
2401/*
2402 * print_load_map_for_objects() prints the load map for each object that has
2403 * a non-zero size in the specified merge section.
2404 */
2405static
2406void
2407print_load_map_for_objects(
2408struct merged_section *ms)
2409{
2410    unsigned long i, j, k;
2411    struct object_list *object_list, **p;
2412    struct object_file *object_file;
2413    struct fine_reloc *fine_relocs;
2414
2415	for(p = &objects; *p; p = &(object_list->next)){
2416	    object_list = *p;
2417	    for(i = 0; i < object_list->used; i++){
2418		object_file = &(object_list->object_files[i]);
2419		if(object_file == base_obj)
2420		    continue;
2421		if(object_file->dylib)
2422		    continue;
2423		if(object_file->bundle_loader)
2424		    continue;
2425		if(object_file->dylinker)
2426		    continue;
2427		for(j = 0; j < object_file->nsection_maps; j++){
2428		    if(object_file->section_maps[j].output_section == ms &&
2429		       object_file->section_maps[j].s->size != 0){
2430
2431		        if(object_file->section_maps[j].nfine_relocs != 0){
2432			    fine_relocs =
2433				object_file->section_maps[j].fine_relocs;
2434			    for(k = 0;
2435				k < object_file->section_maps[j].nfine_relocs;
2436				k++){
2437				print("  (input address 0x%08x) ",
2438				      (unsigned int)
2439				      (object_file->section_maps[j].s->addr +
2440					fine_relocs[k].input_offset));
2441				if((object_file->section_maps[j].s->flags &
2442				    SECTION_TYPE) == S_SYMBOL_STUBS ||
2443				   (object_file->section_maps[j].s->flags &
2444				    SECTION_TYPE) == S_LAZY_SYMBOL_POINTERS ||
2445				   (object_file->section_maps[j].s->flags &
2446				    SECTION_TYPE) ==
2447				    S_NON_LAZY_SYMBOL_POINTERS ||
2448				   (object_file->section_maps[j].s->flags &
2449				    SECTION_TYPE) == S_COALESCED){
2450				    if(fine_relocs[k].use_contents == FALSE)
2451					print("(eliminated)    ");
2452				    else if(dead_strip == TRUE &&
2453				       fine_relocs[k].live == FALSE)
2454					print("(dead stripped) ");
2455				    else
2456					print("     0x%08x ",
2457					      (unsigned int)(ms->s.addr +
2458						fine_relocs[k].output_offset));
2459
2460				}
2461				else if(dead_strip == TRUE &&
2462				   fine_relocs[k].live == FALSE)
2463				    print("(dead stripped) ");
2464				else
2465				    print("     0x%08x ",
2466					  (unsigned int)(ms->s.addr +
2467					    fine_relocs[k].output_offset));
2468				print("0x%08x ",
2469				      (unsigned int)
2470				      (k == (unsigned int)
2471					((object_file->section_maps[j].
2472							    nfine_relocs) -
2473					(unsigned int)1) ?
2474				      (unsigned int)
2475				      (object_file->section_maps[j].s->size) -
2476					(unsigned int)(fine_relocs[k].
2477						input_offset) :
2478				      (unsigned int)(fine_relocs[k + 1].
2479					input_offset) -
2480					(unsigned int)(fine_relocs[k].
2481					input_offset)));
2482				print_obj_name(object_file);
2483				print("\n");
2484			    }
2485			}
2486			else{
2487			    print("\t\t\t\t  0x%08x 0x%08x ",
2488				   (unsigned int)(ms->s.addr +
2489				   object_file->section_maps[j].offset),
2490				   (unsigned int)
2491					(object_file->section_maps[j].s->size));
2492			    print_obj_name(object_file);
2493			    print("\n");
2494			}
2495		    }
2496		}
2497	    }
2498	}
2499}
2500#endif /* !defined(RLD) */
2501
2502#ifdef DEBUG
2503/*
2504 * print_mach_header() prints the output file's mach header.  For debugging.
2505 */
2506__private_extern__
2507void
2508print_mach_header(void)
2509{
2510	print("Mach header for output file\n");
2511	print("    magic = 0x%x\n", (unsigned int)(output_mach_header.magic));
2512	print("    cputype = %d\n", output_mach_header.cputype);
2513	print("    cpusubtype = %d\n", output_mach_header.cpusubtype);
2514	print("    filetype = %u\n", output_mach_header.filetype);
2515	print("    ncmds = %u\n", output_mach_header.ncmds);
2516	print("    sizeofcmds = %u\n", output_mach_header.sizeofcmds);
2517	print("    flags = %u\n", output_mach_header.flags);
2518}
2519
2520/*
2521 * print_symtab_info() prints the output file's symtab command.  For
2522 * debugging.
2523 */
2524__private_extern__
2525void
2526print_symtab_info(void)
2527{
2528	print("Symtab info for output file\n");
2529	print("    cmd = %u\n", output_symtab_info.symtab_command.cmd);
2530	print("    cmdsize = %u\n", output_symtab_info.symtab_command.cmdsize);
2531	print("    nsyms = %u\n", output_symtab_info.symtab_command.nsyms);
2532	print("    symoff = %u\n", output_symtab_info.symtab_command.symoff);
2533	print("    strsize = %u\n", output_symtab_info.symtab_command.strsize);
2534	print("    stroff = %u\n", output_symtab_info.symtab_command.stroff);
2535}
2536
2537/*
2538 * print_thread_info() prints the output file's thread information.  For
2539 * debugging.
2540 */
2541__private_extern__
2542void
2543print_thread_info(void)
2544{
2545	print("Thread info for output file\n");
2546	print("    flavor = %lu\n", output_thread_info.flavor);
2547	print("    count = %lu\n", output_thread_info.count);
2548	print("    entry_point = 0x%x",
2549	      (unsigned int)(output_thread_info.entry_point));
2550	if(output_thread_info.entry_point != NULL)
2551	    print(" (0x%x)\n", (unsigned int)(*output_thread_info.entry_point));
2552	else
2553	    print("\n");
2554}
2555#endif /* DEBUG */
2556