1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *	Copyright (c) 1988 AT&T
29 *	All Rights Reserved
30 */
31
32#pragma ident	"@(#)begin.c	1.18	08/05/31 SMI"
33
34#include <ar.h>
35#include <stdlib.h>
36#include <memory.h>
37#include <errno.h>
38#include <libelf.h>
39#include <sys/mman.h>
40#include "decl.h"
41#include "member.h"
42#include "msg.h"
43
44static const char	armag[] = ARMAG;
45
46#include <crt_externs.h>
47#include <mach/mach.h>
48#include <mach-o/loader.h>
49#include <mach-o/dyld.h>
50#include <mach-o/fat.h>
51#include <sys/sysctl.h>
52
53void
54__swap_mach_header(struct mach_header* header)
55{
56	SWAP32(header->magic);
57	SWAP32(header->cputype);
58	SWAP32(header->cpusubtype);
59	SWAP32(header->filetype);
60	SWAP32(header->ncmds);
61	SWAP32(header->sizeofcmds);
62	SWAP32(header->flags);
63}
64
65void
66__swap_mach_header_64(struct mach_header_64* header)
67{
68	SWAP32(header->magic);
69	SWAP32(header->cputype);
70	SWAP32(header->cpusubtype);
71	SWAP32(header->filetype);
72	SWAP32(header->ncmds);
73	SWAP32(header->sizeofcmds);
74	SWAP32(header->flags);
75}
76
77void
78__swap_segment_command(struct segment_command* segment)
79{
80	SWAP32(segment->cmd);
81	SWAP32(segment->cmdsize);
82	SWAP32(segment->vmaddr);
83	SWAP32(segment->vmsize);
84	SWAP32(segment->fileoff);
85	SWAP32(segment->filesize);
86	SWAP32(segment->maxprot);
87	SWAP32(segment->initprot);
88	SWAP32(segment->nsects);
89	SWAP32(segment->flags);
90}
91
92void
93__swap_segment_command_64(struct segment_command_64* segment)
94{
95	SWAP32(segment->cmd);
96	SWAP32(segment->cmdsize);
97	SWAP64(segment->vmaddr);
98	SWAP64(segment->vmsize);
99	SWAP64(segment->fileoff);
100	SWAP64(segment->filesize);
101	SWAP32(segment->maxprot);
102	SWAP32(segment->initprot);
103	SWAP32(segment->nsects);
104	SWAP32(segment->flags);
105}
106
107void
108__swap_section(struct section* section_ptr)
109{
110	SWAP32(section_ptr->addr);
111	SWAP32(section_ptr->size);
112	SWAP32(section_ptr->offset);
113	SWAP32(section_ptr->align);
114	SWAP32(section_ptr->reloff);
115	SWAP32(section_ptr->nreloc);
116	SWAP32(section_ptr->flags);
117	SWAP32(section_ptr->reserved1);
118	SWAP32(section_ptr->reserved2);
119}
120
121void
122__swap_section_64(struct section_64* section_ptr)
123{
124	SWAP64(section_ptr->addr);
125	SWAP64(section_ptr->size);
126	SWAP32(section_ptr->offset);
127	SWAP32(section_ptr->align);
128	SWAP32(section_ptr->reloff);
129	SWAP32(section_ptr->nreloc);
130	SWAP32(section_ptr->flags);
131	SWAP32(section_ptr->reserved1);
132	SWAP32(section_ptr->reserved2);
133}
134
135void __swap_symtab_command(struct symtab_command *symtab)
136{
137	SWAP32(symtab->cmd);
138	SWAP32(symtab->cmdsize);
139	SWAP32(symtab->symoff);
140	SWAP32(symtab->nsyms);
141	SWAP32(symtab->stroff);
142	SWAP32(symtab->strsize);
143}
144
145static cpu_type_t current_program_arch(void)
146{
147        cpu_type_t current_arch = (_NSGetMachExecuteHeader())->cputype;
148        return current_arch;
149}
150
151static cpu_type_t current_kernel_arch(void)
152{
153        struct host_basic_info  hi;
154        unsigned int            size;
155        kern_return_t           kret;
156        cpu_type_t                                current_arch;
157        int                                                ret, mib[4];
158        size_t                                        len;
159        struct kinfo_proc                kp;
160
161        size = sizeof(hi)/sizeof(int);
162        kret = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hi, &size);
163        if (kret != KERN_SUCCESS) {
164                return 0;
165        }
166        current_arch = hi.cpu_type;
167        /* Now determine if the kernel is running in 64-bit mode */
168        mib[0] = CTL_KERN;
169        mib[1] = KERN_PROC;
170        mib[2] = KERN_PROC_PID;
171        mib[3] = 0; /* kernproc, pid 0 */
172        len = sizeof(kp);
173        ret = sysctl(mib, sizeof(mib)/sizeof(mib[0]), &kp, &len, NULL, 0);
174        if (ret == -1) {
175                return 0;
176        }
177        if (kp.kp_proc.p_flag & P_LP64) {
178                current_arch |= CPU_ARCH_ABI64;
179        }
180        return current_arch;
181}
182
183/*
184 * Initialize archive member
185 */
186Elf *
187_elf_member(int fd, Elf * ref, unsigned flags)
188{
189	register Elf	*elf;
190	Member		*mh;
191	size_t		base;
192
193	if (ref->ed_nextoff >= ref->ed_fsz)
194		return (0);
195	if (ref->ed_fd == -1)		/* disabled */
196		fd = -1;
197	if (flags & EDF_WRITE) {
198		_elf_seterr(EREQ_ARRDWR, 0);
199		return (0);
200	}
201	if (ref->ed_fd != fd) {
202		_elf_seterr(EREQ_ARMEMFD, 0);
203		return (0);
204	}
205	if ((_elf_vm(ref, ref->ed_nextoff, sizeof (struct ar_hdr)) !=
206	    OK_YES) || ((mh = _elf_armem(ref,
207	    ref->ed_ident + ref->ed_nextoff, ref->ed_fsz)) == 0))
208		return (0);
209
210	base = ref->ed_nextoff + sizeof (struct ar_hdr);
211	if (ref->ed_fsz - base < mh->m_hdr.ar_size) {
212		_elf_seterr(EFMT_ARMEMSZ, 0);
213		return (0);
214	}
215	if ((elf = (Elf *)calloc(1, sizeof (Elf))) == 0) {
216		_elf_seterr(EMEM_ELF, errno);
217		return (0);
218	}
219	++ref->ed_activ;
220	elf->ed_parent = ref;
221	elf->ed_fd = fd;
222	elf->ed_myflags |= flags;
223	elf->ed_armem = mh;
224	elf->ed_fsz = mh->m_hdr.ar_size;
225	elf->ed_baseoff = ref->ed_baseoff + base;
226	elf->ed_memoff = base - mh->m_slide;
227	elf->ed_siboff = base + elf->ed_fsz + (elf->ed_fsz & 1);
228	ref->ed_nextoff = elf->ed_siboff;
229	elf->ed_image = ref->ed_image;
230	elf->ed_imagesz = ref->ed_imagesz;
231	elf->ed_vm = ref->ed_vm;
232	elf->ed_vmsz = ref->ed_vmsz;
233	elf->ed_ident = ref->ed_ident + base - mh->m_slide;
234
235	/*
236	 * If this member is the archive string table,
237	 * we've already altered the bytes.
238	 */
239
240	if (ref->ed_arstroff == ref->ed_nextoff)
241		elf->ed_status = ES_COOKED;
242	return (elf);
243}
244
245
246Elf *
247_elf_regular(int fd, unsigned flags)		/* initialize regular file */
248{
249	Elf		*elf;
250
251	if ((elf = (Elf *)calloc(1, sizeof (Elf))) == 0) {
252		_elf_seterr(EMEM_ELF, errno);
253		return (0);
254	}
255
256	NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*elf))
257	elf->ed_fd = fd;
258	elf->ed_myflags |= flags;
259	if (_elf_inmap(elf) != OK_YES) {
260		free(elf);
261		return (0);
262	}
263	NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*elf))
264	return (elf);
265}
266
267
268Elf *
269_elf_config(Elf * elf)
270{
271	char *		base;
272	unsigned	encode;
273
274	ELFRWLOCKINIT(&elf->ed_rwlock);
275
276	/*
277	 * Determine if this is a ELF file.
278	 */
279	base = elf->ed_ident;
280	if ((elf->ed_fsz >= EI_NIDENT) &&
281	    (_elf_vm(elf, (size_t)0, (size_t)EI_NIDENT) == OK_YES) &&
282	    (base[EI_MAG0] == ELFMAG0) &&
283	    (base[EI_MAG1] == ELFMAG1) &&
284	    (base[EI_MAG2] == ELFMAG2) &&
285	    (base[EI_MAG3] == ELFMAG3)) {
286		elf->ed_kind = ELF_K_ELF;
287		elf->ed_class = base[EI_CLASS];
288		elf->ed_encode = base[EI_DATA];
289		if ((elf->ed_version = base[EI_VERSION]) == 0)
290			elf->ed_version = 1;
291		elf->ed_identsz = EI_NIDENT;
292
293		/*
294		 * Allow writing only if originally specified read only.
295		 * This is only necessary if the file must be translating
296		 * from one encoding to another.
297		 */
298		ELFACCESSDATA(encode, _elf_encode)
299		if ((elf->ed_vm == 0) && ((elf->ed_myflags & EDF_WRITE) == 0) &&
300		    (elf->ed_encode != encode)) {
301			if (mprotect((char *)elf->ed_image, elf->ed_imagesz,
302			    PROT_READ|PROT_WRITE) == -1) {
303				_elf_seterr(EIO_VM, errno);
304				return (0);
305			}
306		}
307		return (elf);
308	}
309
310	/*
311	 * Determine if this is a Mach-o file.
312	 */
313	if ((elf->ed_fsz >= sizeof(struct fat_header)) &&
314	    (_elf_vm(elf, (size_t)0, (size_t)sizeof(struct fat_header)) == OK_YES) &&
315	    (FAT_MAGIC == *(unsigned int *)(elf->ed_ident) ||
316		 FAT_CIGAM == *(unsigned int *)(elf->ed_ident)))
317	{
318		struct fat_header *fat_header = (struct fat_header *)(elf->ed_ident);
319		int nfat_arch = OSSwapBigToHostInt32(fat_header->nfat_arch);
320		int end_of_archs = sizeof(struct fat_header) + nfat_arch * sizeof(struct fat_arch);
321		struct fat_arch *arch = (struct fat_arch *)(elf->ed_ident + sizeof(struct fat_header));
322
323		cpu_type_t cputype = (elf->ed_myflags & EDF_RDKERNTYPE) ? current_kernel_arch() :current_program_arch();
324
325		if (end_of_archs > elf->ed_fsz) {
326			_elf_seterr(EIO_VM, errno);
327			return 0;
328		}
329
330		for (; nfat_arch-- > 0; arch++) {
331			if(((cpu_type_t)OSSwapBigToHostInt32(arch->cputype)) == cputype) {
332				elf->ed_ident += OSSwapBigToHostInt32(arch->offset);
333				elf->ed_image += OSSwapBigToHostInt32(arch->offset);
334				elf->ed_fsz -= OSSwapBigToHostInt32(arch->offset);
335				elf->ed_imagesz -= OSSwapBigToHostInt32(arch->offset);
336				break;
337			}
338		}
339		/* Fall through positioned at mach_header for "thin" architecture matching host endian-ness */
340	}
341
342	if ((elf->ed_fsz >= sizeof(struct mach_header)) &&
343	    (_elf_vm(elf, (size_t)0, (size_t)sizeof(struct mach_header)) == OK_YES) &&
344	    (MH_MAGIC == *(unsigned int *)(elf->ed_image) ||
345		 MH_CIGAM == *(unsigned int *)(elf->ed_image))) {
346
347		struct mach_header hdr, *mh = (struct mach_header *)elf->ed_image;
348		struct load_command *thisLC = (struct load_command *)(&(mh[1]));
349		int i, n = 0;
350		int needSwap = (MH_CIGAM == mh->magic);
351
352		if (needSwap) {
353			hdr = *mh;
354			mh = &hdr;
355			__swap_mach_header(mh);
356		}
357
358		for (i = 0; i < mh->ncmds; i++) {
359			int cmd = thisLC->cmd, cmdsize = thisLC->cmdsize;
360
361			if (needSwap) {
362				SWAP32(cmd);
363				SWAP32(cmdsize);
364			}
365
366			switch(cmd) {
367				case LC_SEGMENT:
368				{
369					struct segment_command seg, *thisSG = (struct segment_command *)thisLC;
370
371					if (needSwap) {
372						seg = *thisSG;
373						thisSG = &seg;
374						__swap_segment_command(thisSG);
375					}
376
377					n += thisSG->nsects;
378					break;
379				}
380
381				case LC_SYMTAB:
382					n += 2;
383					break;
384
385				default:
386					break;
387			}
388			thisLC = (struct load_command *) ((caddr_t) thisLC + cmdsize);
389		}
390
391		if (0 == (elf->ed_ident = malloc(sizeof(Elf32_Ehdr)))) {
392			_elf_seterr(EMEM_ELF, errno);
393			return (0);
394		}
395
396		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG0] = 'M';
397		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG1] = 'a';
398		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG2] = 'c';
399		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG3] = 'h';
400		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_CLASS] = ELFCLASS32;
401#if defined(__BIG_ENDIAN__)
402		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_DATA] = (needSwap ? ELFDATA2LSB : ELFDATA2MSB);
403#else
404		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_DATA] = (needSwap ? ELFDATA2MSB : ELFDATA2LSB);
405#endif
406		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_VERSION] = EV_CURRENT;
407		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_OSABI] = ELFOSABI_NONE;
408		((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_ABIVERSION] = 0;
409		((Elf32_Ehdr *)(elf->ed_ident))->e_type = ET_NONE;
410		((Elf32_Ehdr *)(elf->ed_ident))->e_machine = EM_NONE;
411		((Elf32_Ehdr *)(elf->ed_ident))->e_version = EV_CURRENT;
412		((Elf32_Ehdr *)(elf->ed_ident))->e_phoff = 0;
413		((Elf32_Ehdr *)(elf->ed_ident))->e_shoff = sizeof(struct mach_header);
414		((Elf32_Ehdr *)(elf->ed_ident))->e_ehsize = sizeof(Elf32_Ehdr);
415		((Elf32_Ehdr *)(elf->ed_ident))->e_phentsize = sizeof(Elf32_Phdr);
416		((Elf32_Ehdr *)(elf->ed_ident))->e_phnum = 0;
417		((Elf32_Ehdr *)(elf->ed_ident))->e_shentsize = sizeof(Elf32_Shdr);
418		((Elf32_Ehdr *)(elf->ed_ident))->e_shnum = n + 1;
419		((Elf32_Ehdr *)(elf->ed_ident))->e_shstrndx = SHN_MACHO;
420
421		elf->ed_kind = ELF_K_MACHO;
422		elf->ed_class = ((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_CLASS];
423#if defined(__BIG_ENDIAN__)
424		elf->ed_encode = ELFDATA2MSB;
425#else
426		elf->ed_encode = ELFDATA2LSB;
427#endif
428		elf->ed_version = ((Elf32_Ehdr *)(elf->ed_ident))->e_ident[EI_VERSION];
429		elf->ed_identsz = EI_NIDENT;
430
431		/*
432		 * Allow writing only if originally specified read only.
433		 * This is only necessary if the file must be translating
434		 * from one encoding to another.
435		 */
436 		ELFACCESSDATA(encode, _elf_encode)
437		if ((elf->ed_vm == 0) && ((elf->ed_myflags & EDF_WRITE) == 0) &&
438		    (elf->ed_encode != encode)) {
439			if (mprotect((char *)elf->ed_image, elf->ed_imagesz,
440			    PROT_READ|PROT_WRITE) == -1) {
441				_elf_seterr(EIO_VM, errno);
442				return (0);
443			}
444		}
445		return (elf);
446	}
447
448	if ((elf->ed_fsz >= sizeof(struct mach_header_64)) &&
449	    (_elf_vm(elf, (size_t)0, (size_t)sizeof(struct mach_header_64)) == OK_YES) &&
450	    (MH_MAGIC_64 == *(unsigned int *)(elf->ed_image) ||
451		 MH_CIGAM_64 == *(unsigned int *)(elf->ed_image))) {
452
453		struct mach_header_64 hdr, *mh64 = (struct mach_header_64 *)elf->ed_image;
454		struct load_command *thisLC = (struct load_command *)(&(mh64[1]));
455		int i, n = 0;
456		int needSwap = (MH_CIGAM_64 == mh64->magic);
457
458		if (needSwap) {
459			hdr = *mh64;
460			mh64 = &hdr;
461			__swap_mach_header_64(mh64);
462		}
463
464		for (i = 0; i < mh64->ncmds; i++) {
465			int cmd = thisLC->cmd, cmdsize = thisLC->cmdsize;
466
467			if (needSwap) {
468				SWAP32(cmd);
469				SWAP32(cmdsize);
470			}
471
472			switch(cmd) {
473				case LC_SEGMENT_64:
474				{
475					struct segment_command_64 seg, *thisSG64 = (struct segment_command_64 *)thisLC;
476
477					if (needSwap) {
478						seg = *thisSG64;
479						thisSG64 = &seg;
480						__swap_segment_command_64(thisSG64);
481					}
482
483					n += thisSG64->nsects;
484					break;
485				}
486
487				case LC_SYMTAB:
488					n += 2;
489					break;
490
491				default:
492					break;
493			}
494			thisLC = (struct load_command *) ((caddr_t) thisLC + cmdsize);
495		}
496
497		if (0 == (elf->ed_ident = malloc(sizeof(Elf64_Ehdr)))) {
498			_elf_seterr(EMEM_ELF, errno);
499			return (0);
500		}
501
502		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG0] = 'M';
503		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG1] = 'a';
504		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG2] = 'c';
505		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_MAG3] = 'h';
506		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_CLASS] = ELFCLASS64;
507#if defined(__BIG_ENDIAN__)
508		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_DATA] = (needSwap ? ELFDATA2LSB : ELFDATA2MSB);
509#else
510		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_DATA] = (needSwap ? ELFDATA2MSB : ELFDATA2LSB);
511#endif
512		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_VERSION] = EV_CURRENT;
513		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_OSABI] = ELFOSABI_NONE;
514		((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_ABIVERSION] = 0;
515		((Elf64_Ehdr *)(elf->ed_ident))->e_type = ET_NONE;
516		((Elf64_Ehdr *)(elf->ed_ident))->e_machine = EM_NONE;
517		((Elf64_Ehdr *)(elf->ed_ident))->e_version = EV_CURRENT;
518		((Elf64_Ehdr *)(elf->ed_ident))->e_phoff = 0;
519		((Elf64_Ehdr *)(elf->ed_ident))->e_shoff = sizeof(struct mach_header_64);
520		((Elf64_Ehdr *)(elf->ed_ident))->e_ehsize = sizeof(Elf64_Ehdr);
521		((Elf64_Ehdr *)(elf->ed_ident))->e_phentsize = sizeof(Elf64_Phdr);
522		((Elf64_Ehdr *)(elf->ed_ident))->e_phnum = 0;
523		((Elf64_Ehdr *)(elf->ed_ident))->e_shentsize = sizeof(Elf64_Shdr);
524		((Elf64_Ehdr *)(elf->ed_ident))->e_shnum = n + 1;
525		((Elf64_Ehdr *)(elf->ed_ident))->e_shstrndx = SHN_MACHO_64;
526
527		elf->ed_kind = ELF_K_MACHO;
528		elf->ed_class = ((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_CLASS];
529#if defined(__BIG_ENDIAN__)
530		elf->ed_encode = ELFDATA2MSB;
531#else
532		elf->ed_encode = ELFDATA2LSB;
533#endif
534		elf->ed_version = ((Elf64_Ehdr *)(elf->ed_ident))->e_ident[EI_VERSION];
535		elf->ed_identsz = EI_NIDENT;
536
537		/*
538		 * Allow writing only if originally specified read only.
539		 * This is only necessary if the file must be translating
540		 * from one encoding to another.
541		 */
542		ELFACCESSDATA(encode, _elf_encode)
543		if ((elf->ed_vm == 0) && ((elf->ed_myflags & EDF_WRITE) == 0) &&
544		    (elf->ed_encode != encode)) {
545			if (mprotect((char *)elf->ed_image, elf->ed_imagesz,
546			    PROT_READ|PROT_WRITE) == -1) {
547				_elf_seterr(EIO_VM, errno);
548				return (0);
549			}
550		}
551		return (elf);
552	}
553
554	/*
555	 * Determine if this is an Archive
556	 */
557	if ((elf->ed_fsz >= SARMAG) &&
558	    (_elf_vm(elf, (size_t)0, (size_t)SARMAG) == OK_YES) &&
559	    (memcmp(base, armag, SARMAG) == 0)) {
560		_elf_arinit(elf);
561		elf->ed_kind = ELF_K_AR;
562		elf->ed_identsz = SARMAG;
563		return (elf);
564	}
565
566	/*
567	 *	Return a few ident bytes, but not so many that
568	 *	getident() must read a large file.  512 is arbitrary.
569	 */
570
571	elf->ed_kind = ELF_K_NONE;
572	if ((elf->ed_identsz = elf->ed_fsz) > 512)
573		elf->ed_identsz = 512;
574
575	return (elf);
576}
577
578Elf *
579elf_memory(char * image, size_t sz)
580{
581	Elf		*elf;
582	unsigned	work;
583
584	/*
585	 * version() no called yet?
586	 */
587	ELFACCESSDATA(work, _elf_work)
588	if (work == EV_NONE) {
589		_elf_seterr(ESEQ_VER, 0);
590		return (0);
591	}
592
593	if ((elf = (Elf *)calloc(1, sizeof (Elf))) == 0) {
594		_elf_seterr(EMEM_ELF, errno);
595		return (0);
596	}
597	NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*elf))
598	elf->ed_fd = -1;
599	elf->ed_myflags |= EDF_READ | EDF_MEMORY;
600	elf->ed_image = elf->ed_ident = image;
601	elf->ed_imagesz = elf->ed_fsz = elf->ed_identsz = sz;
602	elf->ed_kind = ELF_K_ELF;
603	elf->ed_class = image[EI_CLASS];
604	elf->ed_encode = image[EI_DATA];
605	if ((elf->ed_version = image[EI_VERSION]) == 0)
606		elf->ed_version = 1;
607	elf->ed_identsz = EI_NIDENT;
608	elf->ed_activ = 1;
609	elf = _elf_config(elf);
610	NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*elf))
611	return (elf);
612}
613
614/*
615 * The following is a private interface between the linkers (ld & ld.so.1)
616 * and libelf.
617 *
618 * elf_begin(0, ELF_C_IMAGE, ref)
619 *	Return a new elf_descriptor which uses the memory image from
620 *	ref as the base image of the elf file.  Before this elf_begin()
621 *	is called an elf_update(ref, ELF_C_WRIMAGE) must have been
622 *	done to the ref elf descriptor.
623 *	The ELF_C_IMAGE is unique in that modificatino of the Elf structure
624 *	is illegal (no elf_new*()) but you can modify the actual
625 *	data image of the file in question.
626 *
627 *	When you are done processing this file you can then perform a
628 *	elf_end() on it.
629 *
630 *	NOTE: if an elf_update(ref, ELF_C_WRITE) is done on the ref Elf
631 *		descriptor then the memory image that the ELF_C_IMAGE
632 *		is using has been discarded.  The proper calling convention
633 *		for this is as follows:
634 *
635 *	elf1 = elf_begin(fd, ELF_C_WRITE, 0);
636 *	...
637 *	elf_update(elf1, ELF_C_WRIMAGE);	 build memory image
638 *	elf2 = elf_begin(0, ELF_C_IMAGE, elf1);
639 *	...
640 *	elf_end(elf2);
641 *	elf_updage(elf1, ELF_C_WRITE);		flush memory image to disk
642 *	elf_end(elf1);
643 *
644 *
645 * elf_begin(0, ELF_C_IMAGE, 0);
646 *	returns a pointer to an elf descriptor as if it were opened
647 *	with ELF_C_WRITE except that it has no file descriptor and it
648 *	will not create a file.  It's to be used with the command:
649 *
650 *		elf_update(elf, ELF_C_WRIMAGE)
651 *
652 *	which will build a memory image instead of a file image.
653 *	The memory image is allocated via dynamic memory (malloc) and
654 *	can be free with a subsequent call to
655 *
656 *		elf_update(elf, ELF_C_WRITE)
657 *
658 *	NOTE: that if elf_end(elf) is called it will not free the
659 *		memory image if it is still allocated.  It is then
660 *		the callers responsiblity to free it via a call
661 *		to free().
662 *
663 *	Here is a potential calling sequence for this interface:
664 *
665 *	elf1 = elf_begin(0, ELF_C_IMAGE, 0);
666 *	...
667 *	elf_update(elf1, ELF_C_WRIMAGE);	build memory image
668 *	elf2 = elf_begin(0, ELF_C_IMAGE, elf1);
669 *	...
670 *	image_ptr = elf32_getehdr(elf2);	get pointer to image
671 *	elf_end(elf2);
672 *	elf_end(elf1);
673 *	...
674 *	use image
675 *	...
676 *	free(image_ptr);
677 */
678
679Elf *
680elf_begin(int fd, Elf_Cmd cmd, Elf *ref)
681{
682	register Elf	*elf;
683	unsigned	work;
684	unsigned	flags = 0;
685
686	ELFACCESSDATA(work, _elf_work)
687	if (work == EV_NONE)	/* version() not called yet */
688	{
689		_elf_seterr(ESEQ_VER, 0);
690		return (0);
691	}
692	switch (cmd) {
693	default:
694		_elf_seterr(EREQ_BEGIN, 0);
695		return (0);
696
697	case ELF_C_NULL:
698		return (0);
699
700	case ELF_C_IMAGE:
701		if (ref) {
702			char *	image;
703			size_t	imagesz;
704			ELFRLOCK(ref);
705			if ((image = ref->ed_wrimage) == 0) {
706				_elf_seterr(EREQ_NOWRIMAGE, 0);
707				ELFUNLOCK(ref);
708				return (0);
709			}
710			imagesz = ref->ed_wrimagesz;
711			ELFUNLOCK(ref);
712			return (elf_memory(image, imagesz));
713		}
714		/* FALLTHROUGH */
715	case ELF_C_WRITE:
716		if ((elf = (Elf *)calloc(1, sizeof (Elf))) == 0) {
717			_elf_seterr(EMEM_ELF, errno);
718			return (0);
719		}
720		NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*elf))
721		ELFRWLOCKINIT(&elf->ed_rwlock);
722		elf->ed_fd = fd;
723		elf->ed_activ = 1;
724		elf->ed_myflags |= EDF_WRITE;
725		if (cmd == ELF_C_IMAGE)
726			elf->ed_myflags |= EDF_WRALLOC;
727		NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*elf))
728		return (elf);
729	case ELF_C_RDWR:
730		flags = EDF_WRITE | EDF_READ;
731		break;
732
733	case ELF_C_READ:
734		flags = EDF_READ;
735		break;
736
737	case ELF_C_RDKERNTYPE:
738		flags = EDF_READ | EDF_RDKERNTYPE;
739		break;
740	}
741
742	/*
743	 *	A null ref asks for a new file
744	 *	Non-null ref bumps the activation count
745	 *		or gets next archive member
746	 */
747
748	if (ref == 0) {
749		if ((elf = _elf_regular(fd, flags)) == 0)
750			return (0);
751	} else {
752		ELFWLOCK(ref);
753		if ((ref->ed_myflags & flags) != flags) {
754			_elf_seterr(EREQ_RDWR, 0);
755			ELFUNLOCK(ref);
756			return (0);
757		}
758		/*
759		 * new activation ?
760		 */
761		if (ref->ed_kind != ELF_K_AR) {
762			++ref->ed_activ;
763			ELFUNLOCK(ref);
764			return (ref);
765		}
766		if ((elf = _elf_member(fd, ref, flags)) == 0) {
767			ELFUNLOCK(ref);
768			return (0);
769		}
770		ELFUNLOCK(ref);
771	}
772
773	NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*elf))
774	elf->ed_activ = 1;
775	elf = _elf_config(elf);
776	NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*elf))
777
778	return (elf);
779}
780