exec_elf32.c revision 1.3
1/*	$NetBSD: exec_elf32.c,v 1.3 1995/09/16 00:28:08 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 1994 Christos Zoulas
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/proc.h>
35#include <sys/malloc.h>
36#include <sys/namei.h>
37#include <sys/vnode.h>
38#include <sys/exec.h>
39#include <sys/exec_elf.h>
40
41#include <sys/mman.h>
42#include <vm/vm.h>
43#include <vm/vm_param.h>
44#include <vm/vm_map.h>
45
46#include <machine/cpu.h>
47#include <machine/reg.h>
48#include <machine/exec.h>
49
50#ifdef COMPAT_LINUX
51#include <compat/linux/linux_exec.h>
52#endif
53
54#ifdef COMPAT_SVR4
55#include <compat/svr4/svr4_exec.h>
56#endif
57
58int (*elf_probe_funcs[])() = {
59#ifdef COMPAT_SVR4
60	svr4_elf_probe,
61#endif
62#ifdef COMPAT_LINUX
63	linux_elf_probe
64#endif
65};
66
67static int elf_set_segment __P((struct exec_package *, u_long, u_long,
68	int));
69static int elf_read_from __P((struct proc *, struct vnode *, u_long,
70	caddr_t, int));
71static void elf_load_psection __P((struct exec_vmcmd_set *,
72	struct vnode *, Elf32_Phdr *, u_long *, u_long *, int *));
73
74#define ELF_ALIGN(a, b) ((a) & ~((b) - 1))
75
76/*
77 * Copy arguments onto the stack in the normal way, but add some
78 * extra information in case of dynamic binding.
79 */
80void *
81elf_copyargs(pack, arginfo, stack, argp)
82	struct exec_package *pack;
83	struct ps_strings *arginfo;
84	void *stack;
85	void *argp;
86{
87	char **cpp = stack;
88	char *dp, *sp;
89	size_t len;
90	void *nullp = NULL;
91	int argc = arginfo->ps_nargvstr;
92	int envc = arginfo->ps_nenvstr;
93	AuxInfo *a;
94	struct elf_args *ap;
95
96	if (copyout(&argc, cpp++, sizeof(argc)))
97		return NULL;
98
99	dp = (char *) (cpp + argc + envc + 2 + pack->ep_emul->e_arglen);
100	sp = argp;
101
102	/* XXX don't copy them out, remap them! */
103	arginfo->ps_argvstr = cpp; /* remember location of argv for later */
104
105	for (; --argc >= 0; sp += len, dp += len)
106		if (copyout(&dp, cpp++, sizeof(dp)) ||
107		    copyoutstr(sp, dp, ARG_MAX, &len))
108			return NULL;
109
110	if (copyout(&nullp, cpp++, sizeof(nullp)))
111		return NULL;
112
113	arginfo->ps_envstr = cpp; /* remember location of envp for later */
114
115	for (; --envc >= 0; sp += len, dp += len)
116		if (copyout(&dp, cpp++, sizeof(dp)) ||
117		    copyoutstr(sp, dp, ARG_MAX, &len))
118			return NULL;
119
120	if (copyout(&nullp, cpp++, sizeof(nullp)))
121		return NULL;
122
123	/*
124	 * Push extra arguments on the stack needed by dynamically
125	 * linked binaries
126	 */
127	a = (AuxInfo *) cpp;
128	if ((ap = (struct elf_args *) pack->ep_emul_arg)) {
129
130		a->au_id = AUX_phdr;
131		a->au_v = ap->arg_phaddr;
132		a++;
133
134		a->au_id = AUX_phent;
135		a->au_v = ap->arg_phentsize;
136		a++;
137
138		a->au_id = AUX_phnum;
139		a->au_v = ap->arg_phnum;
140		a++;
141
142		a->au_id = AUX_pagesz;
143		a->au_v = NBPG;
144		a++;
145
146		a->au_id = AUX_base;
147		a->au_v = ap->arg_interp;
148		a++;
149
150		a->au_id = AUX_flags;
151		a->au_v = 0;
152		a++;
153
154		a->au_id = AUX_entry;
155		a->au_v = ap->arg_entry;
156		a++;
157
158		a->au_id = AUX_null;
159		a->au_v = 0;
160		a++;
161
162		free((char *) ap, M_TEMP);
163	}
164	return a;
165}
166
167/*
168 * elf_check_header():
169 *
170 * Check header for validity; return 0 of ok ENOEXEC if error
171 *
172 * XXX machine type needs to be moved to <machine/param.h> so
173 * just one comparison can be done. Unfortunately, there is both
174 * em_486 and em_386, so this would not work on the i386.
175 */
176int
177elf_check_header(eh, type)
178	Elf32_Ehdr *eh;
179	int type;
180{
181
182	if (bcmp(eh->e_ident, Elf32_e_ident, Elf32_e_siz) != 0)
183		return ENOEXEC;
184
185	switch (eh->e_machine) {
186	/* XXX */
187#ifdef i386
188	case Elf32_em_386:
189	case Elf32_em_486:
190#endif
191#ifdef sparc
192	case Elf32_em_sparc:
193#endif
194		break;
195
196	default:
197		return ENOEXEC;
198	}
199
200	if (eh->e_type != type)
201		return ENOEXEC;
202
203	return 0;
204}
205
206/*
207 * elf_load_psection():
208 *
209 * Load a psection at the appropriate address
210 */
211static void
212elf_load_psection(vcset, vp, ph, addr, size, prot)
213	struct exec_vmcmd_set *vcset;
214	struct vnode *vp;
215	Elf32_Phdr *ph;
216	u_long *addr;
217	u_long *size;
218	int *prot;
219{
220	u_long uaddr, msize, rm, rf;
221	long diff, offset;
222
223	/*
224         * If the user specified an address, then we load there.
225         */
226	if (*addr != ELF32_NO_ADDR) {
227		if (ph->p_align > 1) {
228			*addr = ELF_ALIGN(*addr + ph->p_align, ph->p_align);
229			uaddr = ELF_ALIGN(ph->p_vaddr, ph->p_align);
230		} else
231			uaddr = ph->p_vaddr;
232		diff = ph->p_vaddr - uaddr;
233	} else {
234		*addr = uaddr = ph->p_vaddr;
235		if (ph->p_align > 1)
236			*addr = ELF_ALIGN(uaddr, ph->p_align);
237		diff = uaddr - *addr;
238	}
239
240	*prot |= (ph->p_flags & Elf32_pf_r) ? VM_PROT_READ : 0;
241	*prot |= (ph->p_flags & Elf32_pf_w) ? VM_PROT_WRITE : 0;
242	*prot |= (ph->p_flags & Elf32_pf_x) ? VM_PROT_EXECUTE : 0;
243
244	offset = ph->p_offset - diff;
245	*size = ph->p_filesz + diff;
246	msize = ph->p_memsz + diff;
247
248	NEW_VMCMD(vcset, vmcmd_map_readvn, *size, *addr, vp, offset, *prot);
249
250	/*
251         * Check if we need to extend the size of the segment
252         */
253	rm = round_page(*addr + msize);
254	rf = round_page(*addr + *size);
255
256	if (rm != rf) {
257		NEW_VMCMD(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, *prot);
258		*size = msize;
259	}
260}
261
262/*
263 * elf_set_segment():
264 *
265 * Decide if the segment is text or data, depending on the protection
266 * and set it appropriately
267 */
268static int
269elf_set_segment(epp, vaddr, size, prot)
270	struct exec_package *epp;
271	u_long vaddr;
272	u_long size;
273	int prot;
274{
275	/*
276         * Kludge: Unfortunately the current implementation of
277         * exec package assumes a single text and data segment.
278         * In Elf we can have more, but here we limit ourselves
279         * to two and hope :-(
280         * We also assume that the text is r-x, and data is rwx or rw-.
281         */
282	switch (prot) {
283	case (VM_PROT_READ | VM_PROT_EXECUTE):
284		if (epp->ep_tsize != ELF32_NO_ADDR)
285			return ENOEXEC;
286		epp->ep_taddr = vaddr;
287		epp->ep_tsize = size;
288		break;
289
290	case (VM_PROT_READ | VM_PROT_WRITE):
291	case (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE):
292		if (epp->ep_dsize != ELF32_NO_ADDR)
293			return ENOEXEC;
294		epp->ep_daddr = vaddr;
295		epp->ep_dsize = size;
296		break;
297
298	default:
299		return ENOEXEC;
300	}
301	return 0;
302}
303
304/*
305 * elf_read_from():
306 *
307 *	Read from vnode into buffer at offset.
308 */
309static int
310elf_read_from(p, vp, off, buf, size)
311	struct vnode *vp;
312	u_long off;
313	struct proc *p;
314	caddr_t buf;
315	int size;
316{
317	int error;
318	int resid;
319
320	if ((error = vn_rdwr(UIO_READ, vp, buf, size,
321			     off, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred,
322			     &resid, p)) != 0)
323		return error;
324	/*
325         * See if we got all of it
326         */
327	if (resid != 0)
328		return error;
329	return 0;
330}
331
332/*
333 * elf_load_file():
334 *
335 * Load a file (interpreter/library) pointed to by path
336 * [stolen from coff_load_shlib()]. Made slightly generic
337 * so it might be used externally.
338 */
339int
340elf_load_file(p, path, vcset, entry, ap, last)
341	struct proc *p;
342	char *path;
343	struct exec_vmcmd_set *vcset;
344	u_long *entry;
345	struct elf_args	*ap;
346	u_long *last;
347{
348	int error, i;
349	struct nameidata nd;
350	Elf32_Ehdr eh;
351	Elf32_Phdr *ph = NULL;
352	u_long phsize;
353	char *bp = NULL;
354	u_long addr = *last;
355
356	bp = path;
357	/*
358         * 1. open file
359         * 2. read filehdr
360         * 3. map text, data, and bss out of it using VM_*
361         */
362	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
363	if ((error = namei(&nd)) != 0) {
364		return error;
365	}
366	if ((error = elf_read_from(p, nd.ni_vp, 0, (caddr_t) &eh,
367				    sizeof(eh))) != 0)
368		goto bad;
369
370	if ((error = elf_check_header(&eh, Elf32_et_dyn)) != 0)
371		goto bad;
372
373	phsize = eh.e_phnum * sizeof(Elf32_Phdr);
374	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
375
376	if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff,
377				    (caddr_t) ph, phsize)) != 0)
378		goto bad;
379
380	/*
381         * Load all the necessary sections
382         */
383	for (i = 0; i < eh.e_phnum; i++) {
384		u_long size = 0;
385		int prot = 0;
386
387		switch (ph[i].p_type) {
388		case Elf32_pt_load:
389			elf_load_psection(vcset, nd.ni_vp, &ph[i], &addr,
390						&size, &prot);
391			/* Assume that the text segment is r-x only */
392			if ((prot & PROT_WRITE) == 0) {
393				*entry = addr + eh.e_entry;
394				ap->arg_interp = addr;
395			}
396			addr += size;
397			break;
398
399		case Elf32_pt_dynamic:
400		case Elf32_pt_phdr:
401		case Elf32_pt_note:
402			break;
403
404		default:
405			break;
406		}
407	}
408
409bad:
410	if (ph != NULL)
411		free((char *) ph, M_TEMP);
412
413	*last = addr;
414	vrele(nd.ni_vp);
415	return error;
416}
417
418/*
419 * exec_elf_makecmds(): Prepare an Elf binary's exec package
420 *
421 * First, set of the various offsets/lengths in the exec package.
422 *
423 * Then, mark the text image busy (so it can be demand paged) or error
424 * out if this is not possible.  Finally, set up vmcmds for the
425 * text, data, bss, and stack segments.
426 *
427 * XXX no demand paging (yet?)
428 */
429int
430exec_elf_makecmds(p, epp)
431	struct proc *p;
432	struct exec_package *epp;
433{
434	Elf32_Ehdr *eh = epp->ep_hdr;
435	Elf32_Phdr *ph, *pp;
436	int error, i, n;
437	char interp[MAXPATHLEN];
438	u_long pos = 0, phsize;
439
440	if (epp->ep_hdrvalid < sizeof(Elf32_Ehdr))
441		return ENOEXEC;
442
443	if (elf_check_header(eh, Elf32_et_exec))
444		return ENOEXEC;
445
446	/*
447         * check if vnode is in open for writing, because we want to
448         * demand-page out of it.  if it is, don't do it, for various
449         * reasons
450         */
451	if (epp->ep_vp->v_writecount != 0) {
452#ifdef DIAGNOSTIC
453		if (epp->ep_vp->v_flag & VTEXT)
454			panic("exec: a VTEXT vnode has writecount != 0\n");
455#endif
456		return ETXTBSY;
457	}
458	/*
459         * Allocate space to hold all the program headers, and read them
460         * from the file
461         */
462	phsize = eh->e_phnum * sizeof(Elf32_Phdr);
463	ph = (Elf32_Phdr *) malloc(phsize, M_TEMP, M_WAITOK);
464
465	if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
466				    (caddr_t) ph, phsize)) != 0)
467		goto bad;
468
469	epp->ep_tsize = ELF32_NO_ADDR;
470	epp->ep_dsize = ELF32_NO_ADDR;
471
472	interp[0] = '\0';
473
474	for (i = 0; i < eh->e_phnum; i++) {
475		pp = &ph[i];
476		if (pp->p_type == Elf32_pt_interp) {
477			if (pp->p_filesz >= sizeof(interp))
478				goto bad;
479			if ((error = elf_read_from(p, epp->ep_vp, pp->p_offset,
480				      (caddr_t) interp, pp->p_filesz)) != 0)
481				goto bad;
482			break;
483		}
484	}
485
486	/*
487	 * On the same architecture, we may be emulating different systems.
488	 * See which one will accept this executable. This currently only
489	 * applies to Linux and SVR4 on the i386.
490	 *
491	 * Probe functions would normally see if the interpreter (if any)
492	 * exists. Emulation packages may possibly replace the interpreter in
493	 * interp[] with a changed path (/emul/xxx/<path>), and also
494	 * set the ep_emul field in the exec package structure.
495	 */
496	if ((n = sizeof elf_probe_funcs / sizeof elf_probe_funcs[0])) {
497		error = ENOEXEC;
498		for (i = 0; i < n && error; i++)
499			error = elf_probe_funcs[i](p, epp, interp, &pos);
500
501		if (error)
502			goto bad;
503	}
504
505	/*
506         * Load all the necessary sections
507         */
508	for (i = 0; i < eh->e_phnum; i++) {
509		u_long  addr = ELF32_NO_ADDR, size = 0;
510		int prot = 0;
511
512		pp = &ph[i];
513
514		switch (ph[i].p_type) {
515		case Elf32_pt_load:
516			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
517				&ph[i], &addr, &size, &prot);
518			if ((error = elf_set_segment(epp, addr, size,
519						      prot)) != 0)
520				goto bad;
521			break;
522
523		case Elf32_pt_shlib:
524			error = ENOEXEC;
525			goto bad;
526
527		case Elf32_pt_interp:
528			/* Already did this one */
529		case Elf32_pt_dynamic:
530		case Elf32_pt_phdr:
531		case Elf32_pt_note:
532			break;
533
534		default:
535			/*
536			 * Not fatal, we don't need to understand everything
537			 * :-)
538			 */
539			break;
540		}
541	}
542
543	/*
544         * Check if we found a dynamically linked binary and arrange to load
545         * it's interpreter
546         */
547	if (interp[0]) {
548		struct elf_args *ap;
549
550		ap = (struct elf_args *) malloc(sizeof(struct elf_args),
551						 M_TEMP, M_WAITOK);
552		if ((error = elf_load_file(p, interp, &epp->ep_vmcmds,
553				&epp->ep_entry, ap, &pos)) != 0) {
554			free((char *) ap, M_TEMP);
555			goto bad;
556		}
557		/* Arrange to load the program headers. */
558		pos = ELF_ALIGN(pos + NBPG, NBPG);
559		ap->arg_phaddr = pos;
560		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, phsize,
561			  pos, epp->ep_vp, eh->e_phoff,
562			  VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
563		pos += phsize;
564
565		ap->arg_phentsize = eh->e_phentsize;
566		ap->arg_phnum = eh->e_phnum;
567		ap->arg_entry = eh->e_entry;
568
569		epp->ep_emul_arg = ap;
570	} else
571		epp->ep_entry = eh->e_entry;
572
573	free((char *) ph, M_TEMP);
574	epp->ep_vp->v_flag |= VTEXT;
575	return exec_aout_setup_stack(p, epp);
576
577bad:
578	free((char *) ph, M_TEMP);
579	kill_vmcmds(&epp->ep_vmcmds);
580	return ENOEXEC;
581}
582