imgact_elf.c revision 145819
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer
12 *    in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 145819 2005-05-03 10:51:38Z jeff $");
33
34#include <sys/param.h>
35#include <sys/exec.h>
36#include <sys/fcntl.h>
37#include <sys/imgact.h>
38#include <sys/imgact_elf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/mman.h>
44#include <sys/namei.h>
45#include <sys/pioctl.h>
46#include <sys/proc.h>
47#include <sys/procfs.h>
48#include <sys/resourcevar.h>
49#include <sys/systm.h>
50#include <sys/signalvar.h>
51#include <sys/stat.h>
52#include <sys/sx.h>
53#include <sys/syscall.h>
54#include <sys/sysctl.h>
55#include <sys/sysent.h>
56#include <sys/vnode.h>
57
58#include <vm/vm.h>
59#include <vm/vm_kern.h>
60#include <vm/vm_param.h>
61#include <vm/pmap.h>
62#include <vm/vm_map.h>
63#include <vm/vm_object.h>
64#include <vm/vm_extern.h>
65
66#include <machine/elf.h>
67#include <machine/md_var.h>
68
69#define OLD_EI_BRAND	8
70
71static int __elfN(check_header)(const Elf_Ehdr *hdr);
72static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
73    const char *interp);
74static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
75    u_long *entry, size_t pagesize);
76static int __elfN(load_section)(struct proc *p,
77    struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
78    vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
79    vm_prot_t prot, size_t pagesize);
80static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
81
82SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
83    "");
84
85#ifdef __arm__
86int __elfN(fallback_brand) = 9;
87#else
88int __elfN(fallback_brand) = -1;
89#endif
90SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
91    fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
92    __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
93TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
94    &__elfN(fallback_brand));
95
96static int elf_trace = 0;
97SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
98
99static int elf_legacy_coredump = 0;
100SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
101    &elf_legacy_coredump, 0, "");
102
103static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
104
105int
106__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
107{
108	int i;
109
110	for (i = 0; i < MAX_BRANDS; i++) {
111		if (elf_brand_list[i] == NULL) {
112			elf_brand_list[i] = entry;
113			break;
114		}
115	}
116	if (i == MAX_BRANDS)
117		return (-1);
118	return (0);
119}
120
121int
122__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
123{
124	int i;
125
126	for (i = 0; i < MAX_BRANDS; i++) {
127		if (elf_brand_list[i] == entry) {
128			elf_brand_list[i] = NULL;
129			break;
130		}
131	}
132	if (i == MAX_BRANDS)
133		return (-1);
134	return (0);
135}
136
137int
138__elfN(brand_inuse)(Elf_Brandinfo *entry)
139{
140	struct proc *p;
141	int rval = FALSE;
142
143	sx_slock(&allproc_lock);
144	LIST_FOREACH(p, &allproc, p_list) {
145		if (p->p_sysent == entry->sysvec) {
146			rval = TRUE;
147			break;
148		}
149	}
150	sx_sunlock(&allproc_lock);
151
152	return (rval);
153}
154
155static Elf_Brandinfo *
156__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
157{
158	Elf_Brandinfo *bi;
159	int i;
160
161	/*
162	 * We support three types of branding -- (1) the ELF EI_OSABI field
163	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
164	 * branding w/in the ELF header, and (3) path of the `interp_path'
165	 * field.  We should also look for an ".note.ABI-tag" ELF section now
166	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
167	 */
168
169	/* If the executable has a brand, search for it in the brand list. */
170	for (i = 0; i < MAX_BRANDS; i++) {
171		bi = elf_brand_list[i];
172		if (bi != NULL && hdr->e_machine == bi->machine &&
173		    (hdr->e_ident[EI_OSABI] == bi->brand ||
174		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
175		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
176			return (bi);
177	}
178
179	/* Lacking a known brand, search for a recognized interpreter. */
180	if (interp != NULL) {
181		for (i = 0; i < MAX_BRANDS; i++) {
182			bi = elf_brand_list[i];
183			if (bi != NULL && hdr->e_machine == bi->machine &&
184			    strcmp(interp, bi->interp_path) == 0)
185				return (bi);
186		}
187	}
188
189	/* Lacking a recognized interpreter, try the default brand */
190	for (i = 0; i < MAX_BRANDS; i++) {
191		bi = elf_brand_list[i];
192		if (bi != NULL && hdr->e_machine == bi->machine &&
193		    __elfN(fallback_brand) == bi->brand)
194			return (bi);
195	}
196	return (NULL);
197}
198
199static int
200__elfN(check_header)(const Elf_Ehdr *hdr)
201{
202	Elf_Brandinfo *bi;
203	int i;
204
205	if (!IS_ELF(*hdr) ||
206	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
207	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
208	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
209	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
210	    hdr->e_version != ELF_TARG_VER)
211		return (ENOEXEC);
212
213	/*
214	 * Make sure we have at least one brand for this machine.
215	 */
216
217	for (i = 0; i < MAX_BRANDS; i++) {
218		bi = elf_brand_list[i];
219		if (bi != NULL && bi->machine == hdr->e_machine)
220			break;
221	}
222	if (i == MAX_BRANDS)
223		return (ENOEXEC);
224
225	return (0);
226}
227
228static int
229__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
230	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
231	vm_prot_t max)
232{
233	int error, rv;
234	vm_offset_t off;
235	vm_offset_t data_buf = 0;
236
237	/*
238	 * Create the page if it doesn't exist yet. Ignore errors.
239	 */
240	vm_map_lock(map);
241	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
242	    max, 0);
243	vm_map_unlock(map);
244
245	/*
246	 * Find the page from the underlying object.
247	 */
248	if (object) {
249		vm_object_reference(object);
250		rv = vm_map_find(exec_map,
251				 object,
252				 trunc_page(offset),
253				 &data_buf,
254				 PAGE_SIZE,
255				 TRUE,
256				 VM_PROT_READ,
257				 VM_PROT_ALL,
258				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
259		if (rv != KERN_SUCCESS) {
260			vm_object_deallocate(object);
261			return (rv);
262		}
263
264		off = offset - trunc_page(offset);
265		error = copyout((caddr_t)data_buf + off, (caddr_t)start,
266		    end - start);
267		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
268		if (error) {
269			return (KERN_FAILURE);
270		}
271	}
272
273	return (KERN_SUCCESS);
274}
275
276static int
277__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
278	vm_offset_t start, vm_offset_t end, vm_prot_t prot,
279	vm_prot_t max, int cow)
280{
281	vm_offset_t data_buf, off;
282	vm_size_t sz;
283	int error, rv;
284
285	if (start != trunc_page(start)) {
286		rv = __elfN(map_partial)(map, object, offset, start,
287		    round_page(start), prot, max);
288		if (rv)
289			return (rv);
290		offset += round_page(start) - start;
291		start = round_page(start);
292	}
293	if (end != round_page(end)) {
294		rv = __elfN(map_partial)(map, object, offset +
295		    trunc_page(end) - start, trunc_page(end), end, prot, max);
296		if (rv)
297			return (rv);
298		end = trunc_page(end);
299	}
300	if (end > start) {
301		if (offset & PAGE_MASK) {
302			/*
303			 * The mapping is not page aligned. This means we have
304			 * to copy the data. Sigh.
305			 */
306			rv = vm_map_find(map, 0, 0, &start, end - start,
307			    FALSE, prot, max, 0);
308			if (rv)
309				return (rv);
310			data_buf = 0;
311			while (start < end) {
312				vm_object_reference(object);
313				rv = vm_map_find(exec_map,
314						 object,
315						 trunc_page(offset),
316						 &data_buf,
317						 2 * PAGE_SIZE,
318						 TRUE,
319						 VM_PROT_READ,
320						 VM_PROT_ALL,
321						 (MAP_COPY_ON_WRITE
322						  | MAP_PREFAULT_PARTIAL));
323				if (rv != KERN_SUCCESS) {
324					vm_object_deallocate(object);
325					return (rv);
326				}
327				off = offset - trunc_page(offset);
328				sz = end - start;
329				if (sz > PAGE_SIZE)
330					sz = PAGE_SIZE;
331				error = copyout((caddr_t)data_buf + off,
332				    (caddr_t)start, sz);
333				vm_map_remove(exec_map, data_buf,
334				    data_buf + 2 * PAGE_SIZE);
335				if (error) {
336					return (KERN_FAILURE);
337				}
338				start += sz;
339			}
340			rv = KERN_SUCCESS;
341		} else {
342			vm_map_lock(map);
343			rv = vm_map_insert(map, object, offset, start, end,
344			    prot, max, cow);
345			vm_map_unlock(map);
346		}
347		return (rv);
348	} else {
349		return (KERN_SUCCESS);
350	}
351}
352
353static int
354__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
355	struct vnode *vp, vm_object_t object, vm_offset_t offset,
356	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
357	size_t pagesize)
358{
359	size_t map_len;
360	vm_offset_t map_addr;
361	int error, rv, cow;
362	size_t copy_len;
363	vm_offset_t file_addr;
364	vm_offset_t data_buf = 0;
365
366	error = 0;
367
368	/*
369	 * It's necessary to fail if the filsz + offset taken from the
370	 * header is greater than the actual file pager object's size.
371	 * If we were to allow this, then the vm_map_find() below would
372	 * walk right off the end of the file object and into the ether.
373	 *
374	 * While I'm here, might as well check for something else that
375	 * is invalid: filsz cannot be greater than memsz.
376	 */
377	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
378	    filsz > memsz) {
379		uprintf("elf_load_section: truncated ELF file\n");
380		return (ENOEXEC);
381	}
382
383#define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
384#define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
385
386	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
387	file_addr = trunc_page_ps(offset, pagesize);
388
389	/*
390	 * We have two choices.  We can either clear the data in the last page
391	 * of an oversized mapping, or we can start the anon mapping a page
392	 * early and copy the initialized data into that first page.  We
393	 * choose the second..
394	 */
395	if (memsz > filsz)
396		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
397	else
398		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
399
400	if (map_len != 0) {
401		vm_object_reference(object);
402
403		/* cow flags: don't dump readonly sections in core */
404		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
405		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
406
407		rv = __elfN(map_insert)(&vmspace->vm_map,
408				      object,
409				      file_addr,	/* file offset */
410				      map_addr,		/* virtual start */
411				      map_addr + map_len,/* virtual end */
412				      prot,
413				      VM_PROT_ALL,
414				      cow);
415		if (rv != KERN_SUCCESS) {
416			vm_object_deallocate(object);
417			return (EINVAL);
418		}
419
420		/* we can stop now if we've covered it all */
421		if (memsz == filsz) {
422			return (0);
423		}
424	}
425
426
427	/*
428	 * We have to get the remaining bit of the file into the first part
429	 * of the oversized map segment.  This is normally because the .data
430	 * segment in the file is extended to provide bss.  It's a neat idea
431	 * to try and save a page, but it's a pain in the behind to implement.
432	 */
433	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
434	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
435	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
436	    map_addr;
437
438	/* This had damn well better be true! */
439	if (map_len != 0) {
440		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
441		    map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
442		if (rv != KERN_SUCCESS) {
443			return (EINVAL);
444		}
445	}
446
447	if (copy_len != 0) {
448		vm_offset_t off;
449		vm_object_reference(object);
450		rv = vm_map_find(exec_map,
451				 object,
452				 trunc_page(offset + filsz),
453				 &data_buf,
454				 PAGE_SIZE,
455				 TRUE,
456				 VM_PROT_READ,
457				 VM_PROT_ALL,
458				 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
459		if (rv != KERN_SUCCESS) {
460			vm_object_deallocate(object);
461			return (EINVAL);
462		}
463
464		/* send the page fragment to user space */
465		off = trunc_page_ps(offset + filsz, pagesize) -
466		    trunc_page(offset + filsz);
467		error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
468		    copy_len);
469		vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
470		if (error) {
471			return (error);
472		}
473	}
474
475	/*
476	 * set it to the specified protection.
477	 * XXX had better undo the damage from pasting over the cracks here!
478	 */
479	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
480	    round_page(map_addr + map_len),  prot, FALSE);
481
482	return (error);
483}
484
485/*
486 * Load the file "file" into memory.  It may be either a shared object
487 * or an executable.
488 *
489 * The "addr" reference parameter is in/out.  On entry, it specifies
490 * the address where a shared object should be loaded.  If the file is
491 * an executable, this value is ignored.  On exit, "addr" specifies
492 * where the file was actually loaded.
493 *
494 * The "entry" reference parameter is out only.  On exit, it specifies
495 * the entry point for the loaded file.
496 */
497static int
498__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
499	u_long *entry, size_t pagesize)
500{
501	struct {
502		struct nameidata nd;
503		struct vattr attr;
504		struct image_params image_params;
505	} *tempdata;
506	const Elf_Ehdr *hdr = NULL;
507	const Elf_Phdr *phdr = NULL;
508	struct nameidata *nd;
509	struct vmspace *vmspace = p->p_vmspace;
510	struct vattr *attr;
511	struct image_params *imgp;
512	vm_prot_t prot;
513	u_long rbase;
514	u_long base_addr = 0;
515	int error, i, numsegs;
516
517	if (curthread->td_proc != p)
518		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
519
520	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
521	nd = &tempdata->nd;
522	attr = &tempdata->attr;
523	imgp = &tempdata->image_params;
524
525	/*
526	 * Initialize part of the common data
527	 */
528	imgp->proc = p;
529	imgp->attr = attr;
530	imgp->firstpage = NULL;
531	imgp->image_header = NULL;
532	imgp->object = NULL;
533	imgp->execlabel = NULL;
534
535	/* XXXKSE */
536	NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
537
538	if ((error = namei(nd)) != 0) {
539		nd->ni_vp = NULL;
540		goto fail;
541	}
542	NDFREE(nd, NDF_ONLY_PNBUF);
543	imgp->vp = nd->ni_vp;
544
545	/*
546	 * Check permissions, modes, uid, etc on the file, and "open" it.
547	 */
548	error = exec_check_permissions(imgp);
549	if (error) {
550		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
551		goto fail;
552	}
553
554	error = exec_map_first_page(imgp);
555	/*
556	 * Also make certain that the interpreter stays the same, so set
557	 * its VV_TEXT flag, too.
558	 */
559	if (error == 0)
560		nd->ni_vp->v_vflag |= VV_TEXT;
561
562	imgp->object = nd->ni_vp->v_object;
563	vm_object_reference(imgp->object);
564
565	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
566	if (error)
567		goto fail;
568
569	hdr = (const Elf_Ehdr *)imgp->image_header;
570	if ((error = __elfN(check_header)(hdr)) != 0)
571		goto fail;
572	if (hdr->e_type == ET_DYN)
573		rbase = *addr;
574	else if (hdr->e_type == ET_EXEC)
575		rbase = 0;
576	else {
577		error = ENOEXEC;
578		goto fail;
579	}
580
581	/* Only support headers that fit within first page for now      */
582	/*    (multiplication of two Elf_Half fields will not overflow) */
583	if ((hdr->e_phoff > PAGE_SIZE) ||
584	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
585		error = ENOEXEC;
586		goto fail;
587	}
588
589	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
590
591	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
592		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
593			prot = 0;
594			if (phdr[i].p_flags & PF_X)
595  				prot |= VM_PROT_EXECUTE;
596			if (phdr[i].p_flags & PF_W)
597  				prot |= VM_PROT_WRITE;
598			if (phdr[i].p_flags & PF_R)
599  				prot |= VM_PROT_READ;
600
601			if ((error = __elfN(load_section)(p, vmspace,
602			    nd->ni_vp, imgp->object, phdr[i].p_offset,
603			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
604			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
605			    pagesize)) != 0)
606				goto fail;
607			/*
608			 * Establish the base address if this is the
609			 * first segment.
610			 */
611			if (numsegs == 0)
612  				base_addr = trunc_page(phdr[i].p_vaddr +
613				    rbase);
614			numsegs++;
615		}
616	}
617	*addr = base_addr;
618	*entry = (unsigned long)hdr->e_entry + rbase;
619
620fail:
621	if (imgp->firstpage)
622		exec_unmap_first_page(imgp);
623	if (imgp->object)
624		vm_object_deallocate(imgp->object);
625
626	if (nd->ni_vp)
627		vrele(nd->ni_vp);
628
629	free(tempdata, M_TEMP);
630
631	return (error);
632}
633
634static int
635__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
636{
637	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
638	const Elf_Phdr *phdr;
639	Elf_Auxargs *elf_auxargs = NULL;
640	struct vmspace *vmspace;
641	vm_prot_t prot;
642	u_long text_size = 0, data_size = 0, total_size = 0;
643	u_long text_addr = 0, data_addr = 0;
644	u_long seg_size, seg_addr;
645	u_long addr, entry = 0, proghdr = 0;
646	int error = 0, i;
647	const char *interp = NULL;
648	Elf_Brandinfo *brand_info;
649	char *path;
650	struct thread *td = curthread;
651	struct sysentvec *sv;
652
653	/*
654	 * Do we have a valid ELF header ?
655	 */
656	if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
657		return (-1);
658
659	/*
660	 * From here on down, we return an errno, not -1, as we've
661	 * detected an ELF file.
662	 */
663
664	if ((hdr->e_phoff > PAGE_SIZE) ||
665	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
666		/* Only support headers in first page for now */
667		return (ENOEXEC);
668	}
669	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
670
671	/*
672	 * From this point on, we may have resources that need to be freed.
673	 */
674
675	VOP_UNLOCK(imgp->vp, 0, td);
676
677	for (i = 0; i < hdr->e_phnum; i++) {
678		switch (phdr[i].p_type) {
679	  	case PT_INTERP:	/* Path to interpreter */
680			if (phdr[i].p_filesz > MAXPATHLEN ||
681			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
682				error = ENOEXEC;
683				goto fail;
684			}
685			interp = imgp->image_header + phdr[i].p_offset;
686			break;
687		default:
688			break;
689		}
690	}
691
692	brand_info = __elfN(get_brandinfo)(hdr, interp);
693	if (brand_info == NULL) {
694		uprintf("ELF binary type \"%u\" not known.\n",
695		    hdr->e_ident[EI_OSABI]);
696		error = ENOEXEC;
697		goto fail;
698	}
699	sv = brand_info->sysvec;
700	if (interp != NULL && brand_info->interp_newpath != NULL)
701		interp = brand_info->interp_newpath;
702
703	exec_new_vmspace(imgp, sv);
704
705	vmspace = imgp->proc->p_vmspace;
706
707	for (i = 0; i < hdr->e_phnum; i++) {
708		switch (phdr[i].p_type) {
709		case PT_LOAD:	/* Loadable segment */
710			prot = 0;
711			if (phdr[i].p_flags & PF_X)
712  				prot |= VM_PROT_EXECUTE;
713			if (phdr[i].p_flags & PF_W)
714  				prot |= VM_PROT_WRITE;
715			if (phdr[i].p_flags & PF_R)
716  				prot |= VM_PROT_READ;
717
718#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
719			/*
720			 * Some x86 binaries assume read == executable,
721			 * notably the M3 runtime and therefore cvsup
722			 */
723			if (prot & VM_PROT_READ)
724				prot |= VM_PROT_EXECUTE;
725#endif
726
727			if ((error = __elfN(load_section)(imgp->proc, vmspace,
728			    imgp->vp, imgp->object, phdr[i].p_offset,
729			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
730			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
731			    sv->sv_pagesize)) != 0)
732  				goto fail;
733
734			/*
735			 * If this segment contains the program headers,
736			 * remember their virtual address for the AT_PHDR
737			 * aux entry. Static binaries don't usually include
738			 * a PT_PHDR entry.
739			 */
740			if (phdr[i].p_offset == 0 &&
741			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
742				<= phdr[i].p_filesz)
743				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
744
745			seg_addr = trunc_page(phdr[i].p_vaddr);
746			seg_size = round_page(phdr[i].p_memsz +
747			    phdr[i].p_vaddr - seg_addr);
748
749			/*
750			 * Is this .text or .data?  We can't use
751			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
752			 * alpha terribly and possibly does other bad
753			 * things so we stick to the old way of figuring
754			 * it out:  If the segment contains the program
755			 * entry point, it's a text segment, otherwise it
756			 * is a data segment.
757			 *
758			 * Note that obreak() assumes that data_addr +
759			 * data_size == end of data load area, and the ELF
760			 * file format expects segments to be sorted by
761			 * address.  If multiple data segments exist, the
762			 * last one will be used.
763			 */
764			if (hdr->e_entry >= phdr[i].p_vaddr &&
765			    hdr->e_entry < (phdr[i].p_vaddr +
766			    phdr[i].p_memsz)) {
767				text_size = seg_size;
768				text_addr = seg_addr;
769				entry = (u_long)hdr->e_entry;
770			} else {
771				data_size = seg_size;
772				data_addr = seg_addr;
773			}
774			total_size += seg_size;
775			break;
776		case PT_PHDR: 	/* Program header table info */
777			proghdr = phdr[i].p_vaddr;
778			break;
779		default:
780			break;
781		}
782	}
783
784	if (data_addr == 0 && data_size == 0) {
785		data_addr = text_addr;
786		data_size = text_size;
787	}
788
789	/*
790	 * Check limits.  It should be safe to check the
791	 * limits after loading the segments since we do
792	 * not actually fault in all the segments pages.
793	 */
794	PROC_LOCK(imgp->proc);
795	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
796	    text_size > maxtsiz ||
797	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
798		PROC_UNLOCK(imgp->proc);
799		error = ENOMEM;
800		goto fail;
801	}
802
803	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
804	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
805	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
806	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
807
808	/*
809	 * We load the dynamic linker where a userland call
810	 * to mmap(0, ...) would put it.  The rationale behind this
811	 * calculation is that it leaves room for the heap to grow to
812	 * its maximum allowed size.
813	 */
814	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
815	    lim_max(imgp->proc, RLIMIT_DATA));
816	PROC_UNLOCK(imgp->proc);
817
818	imgp->entry_addr = entry;
819
820	imgp->proc->p_sysent = sv;
821	if (interp != NULL && brand_info->emul_path != NULL &&
822	    brand_info->emul_path[0] != '\0') {
823		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
824		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
825		    interp);
826		error = __elfN(load_file)(imgp->proc, path, &addr,
827		    &imgp->entry_addr, sv->sv_pagesize);
828		free(path, M_TEMP);
829		if (error == 0)
830			interp = NULL;
831	}
832	if (interp != NULL) {
833		error = __elfN(load_file)(imgp->proc, interp, &addr,
834		    &imgp->entry_addr, sv->sv_pagesize);
835		if (error != 0) {
836			uprintf("ELF interpreter %s not found\n", interp);
837			goto fail;
838		}
839	}
840
841	/*
842	 * Construct auxargs table (used by the fixup routine)
843	 */
844	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
845	elf_auxargs->execfd = -1;
846	elf_auxargs->phdr = proghdr;
847	elf_auxargs->phent = hdr->e_phentsize;
848	elf_auxargs->phnum = hdr->e_phnum;
849	elf_auxargs->pagesz = PAGE_SIZE;
850	elf_auxargs->base = addr;
851	elf_auxargs->flags = 0;
852	elf_auxargs->entry = entry;
853	elf_auxargs->trace = elf_trace;
854
855	imgp->auxargs = elf_auxargs;
856	imgp->interpreted = 0;
857
858fail:
859	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
860	return (error);
861}
862
863#define	suword __CONCAT(suword, __ELF_WORD_SIZE)
864
865int
866__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
867{
868	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
869	Elf_Addr *base;
870	Elf_Addr *pos;
871
872	base = (Elf_Addr *)*stack_base;
873	pos = base + (imgp->args->argc + imgp->args->envc + 2);
874
875	if (args->trace) {
876		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
877	}
878	if (args->execfd != -1) {
879		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
880	}
881	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
882	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
883	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
884	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
885	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
886	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
887	AUXARGS_ENTRY(pos, AT_BASE, args->base);
888	AUXARGS_ENTRY(pos, AT_NULL, 0);
889
890	free(imgp->auxargs, M_TEMP);
891	imgp->auxargs = NULL;
892
893	base--;
894	suword(base, (long)imgp->args->argc);
895	*stack_base = (register_t *)base;
896	return (0);
897}
898
899/*
900 * Code for generating ELF core dumps.
901 */
902
903typedef void (*segment_callback)(vm_map_entry_t, void *);
904
905/* Closure for cb_put_phdr(). */
906struct phdr_closure {
907	Elf_Phdr *phdr;		/* Program header to fill in */
908	Elf_Off offset;		/* Offset of segment in core file */
909};
910
911/* Closure for cb_size_segment(). */
912struct sseg_closure {
913	int count;		/* Count of writable segments. */
914	size_t size;		/* Total size of all writable segments. */
915};
916
917static void cb_put_phdr(vm_map_entry_t, void *);
918static void cb_size_segment(vm_map_entry_t, void *);
919static void each_writable_segment(struct thread *, segment_callback, void *);
920static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
921    int, void *, size_t);
922static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
923static void __elfN(putnote)(void *, size_t *, const char *, int,
924    const void *, size_t);
925
926extern int osreldate;
927
928int
929__elfN(coredump)(td, vp, limit)
930	struct thread *td;
931	struct vnode *vp;
932	off_t limit;
933{
934	struct ucred *cred = td->td_ucred;
935	int error = 0;
936	struct sseg_closure seginfo;
937	void *hdr;
938	size_t hdrsize;
939
940	/* Size the program segments. */
941	seginfo.count = 0;
942	seginfo.size = 0;
943	each_writable_segment(td, cb_size_segment, &seginfo);
944
945	/*
946	 * Calculate the size of the core file header area by making
947	 * a dry run of generating it.  Nothing is written, but the
948	 * size is calculated.
949	 */
950	hdrsize = 0;
951	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
952
953	if (hdrsize + seginfo.size >= limit)
954		return (EFAULT);
955
956	/*
957	 * Allocate memory for building the header, fill it up,
958	 * and write it out.
959	 */
960	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
961	if (hdr == NULL) {
962		return (EINVAL);
963	}
964	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
965
966	/* Write the contents of all of the writable segments. */
967	if (error == 0) {
968		Elf_Phdr *php;
969		off_t offset;
970		int i;
971
972		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
973		offset = hdrsize;
974		for (i = 0; i < seginfo.count; i++) {
975			error = vn_rdwr_inchunks(UIO_WRITE, vp,
976			    (caddr_t)(uintptr_t)php->p_vaddr,
977			    php->p_filesz, offset, UIO_USERSPACE,
978			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
979			    curthread); /* XXXKSE */
980			if (error != 0)
981				break;
982			offset += php->p_filesz;
983			php++;
984		}
985	}
986	free(hdr, M_TEMP);
987
988	return (error);
989}
990
991/*
992 * A callback for each_writable_segment() to write out the segment's
993 * program header entry.
994 */
995static void
996cb_put_phdr(entry, closure)
997	vm_map_entry_t entry;
998	void *closure;
999{
1000	struct phdr_closure *phc = (struct phdr_closure *)closure;
1001	Elf_Phdr *phdr = phc->phdr;
1002
1003	phc->offset = round_page(phc->offset);
1004
1005	phdr->p_type = PT_LOAD;
1006	phdr->p_offset = phc->offset;
1007	phdr->p_vaddr = entry->start;
1008	phdr->p_paddr = 0;
1009	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1010	phdr->p_align = PAGE_SIZE;
1011	phdr->p_flags = 0;
1012	if (entry->protection & VM_PROT_READ)
1013		phdr->p_flags |= PF_R;
1014	if (entry->protection & VM_PROT_WRITE)
1015		phdr->p_flags |= PF_W;
1016	if (entry->protection & VM_PROT_EXECUTE)
1017		phdr->p_flags |= PF_X;
1018
1019	phc->offset += phdr->p_filesz;
1020	phc->phdr++;
1021}
1022
1023/*
1024 * A callback for each_writable_segment() to gather information about
1025 * the number of segments and their total size.
1026 */
1027static void
1028cb_size_segment(entry, closure)
1029	vm_map_entry_t entry;
1030	void *closure;
1031{
1032	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1033
1034	ssc->count++;
1035	ssc->size += entry->end - entry->start;
1036}
1037
1038/*
1039 * For each writable segment in the process's memory map, call the given
1040 * function with a pointer to the map entry and some arbitrary
1041 * caller-supplied data.
1042 */
1043static void
1044each_writable_segment(td, func, closure)
1045	struct thread *td;
1046	segment_callback func;
1047	void *closure;
1048{
1049	struct proc *p = td->td_proc;
1050	vm_map_t map = &p->p_vmspace->vm_map;
1051	vm_map_entry_t entry;
1052
1053	for (entry = map->header.next; entry != &map->header;
1054	    entry = entry->next) {
1055		vm_object_t obj;
1056
1057		/*
1058		 * Don't dump inaccessible mappings, deal with legacy
1059		 * coredump mode.
1060		 *
1061		 * Note that read-only segments related to the elf binary
1062		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1063		 * need to arbitrarily ignore such segments.
1064		 */
1065		if (elf_legacy_coredump) {
1066			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1067				continue;
1068		} else {
1069			if ((entry->protection & VM_PROT_ALL) == 0)
1070				continue;
1071		}
1072
1073		/*
1074		 * Dont include memory segment in the coredump if
1075		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1076		 * madvise(2).  Do not dump submaps (i.e. parts of the
1077		 * kernel map).
1078		 */
1079		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1080			continue;
1081
1082		if ((obj = entry->object.vm_object) == NULL)
1083			continue;
1084
1085		/* Find the deepest backing object. */
1086		while (obj->backing_object != NULL)
1087			obj = obj->backing_object;
1088
1089		/* Ignore memory-mapped devices and such things. */
1090		if (obj->type != OBJT_DEFAULT &&
1091		    obj->type != OBJT_SWAP &&
1092		    obj->type != OBJT_VNODE)
1093			continue;
1094
1095		(*func)(entry, closure);
1096	}
1097}
1098
1099/*
1100 * Write the core file header to the file, including padding up to
1101 * the page boundary.
1102 */
1103static int
1104__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1105	struct thread *td;
1106	struct vnode *vp;
1107	struct ucred *cred;
1108	int numsegs;
1109	size_t hdrsize;
1110	void *hdr;
1111{
1112	size_t off;
1113
1114	/* Fill in the header. */
1115	bzero(hdr, hdrsize);
1116	off = 0;
1117	__elfN(puthdr)(td, hdr, &off, numsegs);
1118
1119	/* Write it to the core file. */
1120	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1121	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1122	    td)); /* XXXKSE */
1123}
1124
1125static void
1126__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1127{
1128	struct {
1129		prstatus_t status;
1130		prfpregset_t fpregset;
1131		prpsinfo_t psinfo;
1132	} *tempdata;
1133	prstatus_t *status;
1134	prfpregset_t *fpregset;
1135	prpsinfo_t *psinfo;
1136	struct proc *p;
1137	struct thread *thr;
1138	size_t ehoff, noteoff, notesz, phoff;
1139
1140	p = td->td_proc;
1141
1142	ehoff = *off;
1143	*off += sizeof(Elf_Ehdr);
1144
1145	phoff = *off;
1146	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1147
1148	noteoff = *off;
1149	/*
1150	 * Don't allocate space for the notes if we're just calculating
1151	 * the size of the header. We also don't collect the data.
1152	 */
1153	if (dst != NULL) {
1154		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1155		status = &tempdata->status;
1156		fpregset = &tempdata->fpregset;
1157		psinfo = &tempdata->psinfo;
1158	} else {
1159		tempdata = NULL;
1160		status = NULL;
1161		fpregset = NULL;
1162		psinfo = NULL;
1163	}
1164
1165	if (dst != NULL) {
1166		psinfo->pr_version = PRPSINFO_VERSION;
1167		psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1168		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1169		/*
1170		 * XXX - We don't fill in the command line arguments properly
1171		 * yet.
1172		 */
1173		strlcpy(psinfo->pr_psargs, p->p_comm,
1174		    sizeof(psinfo->pr_psargs));
1175	}
1176	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1177	    sizeof *psinfo);
1178
1179	/*
1180	 * To have the debugger select the right thread (LWP) as the initial
1181	 * thread, we dump the state of the thread passed to us in td first.
1182	 * This is the thread that causes the core dump and thus likely to
1183	 * be the right thread one wants to have selected in the debugger.
1184	 */
1185	thr = td;
1186	while (thr != NULL) {
1187		if (dst != NULL) {
1188			status->pr_version = PRSTATUS_VERSION;
1189			status->pr_statussz = sizeof(prstatus_t);
1190			status->pr_gregsetsz = sizeof(gregset_t);
1191			status->pr_fpregsetsz = sizeof(fpregset_t);
1192			status->pr_osreldate = osreldate;
1193			status->pr_cursig = p->p_sig;
1194			status->pr_pid = thr->td_tid;
1195			fill_regs(thr, &status->pr_reg);
1196			fill_fpregs(thr, fpregset);
1197		}
1198		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1199		    sizeof *status);
1200		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1201		    sizeof *fpregset);
1202		/*
1203		 * Allow for MD specific notes, as well as any MD
1204		 * specific preparations for writing MI notes.
1205		 */
1206		__elfN(dump_thread)(thr, dst, off);
1207
1208		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1209		    TAILQ_NEXT(thr, td_plist);
1210		if (thr == td)
1211			thr = TAILQ_NEXT(thr, td_plist);
1212	}
1213
1214	notesz = *off - noteoff;
1215
1216	if (dst != NULL)
1217		free(tempdata, M_TEMP);
1218
1219	/* Align up to a page boundary for the program segments. */
1220	*off = round_page(*off);
1221
1222	if (dst != NULL) {
1223		Elf_Ehdr *ehdr;
1224		Elf_Phdr *phdr;
1225		struct phdr_closure phc;
1226
1227		/*
1228		 * Fill in the ELF header.
1229		 */
1230		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1231		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1232		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1233		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1234		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1235		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1236		ehdr->e_ident[EI_DATA] = ELF_DATA;
1237		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1238		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1239		ehdr->e_ident[EI_ABIVERSION] = 0;
1240		ehdr->e_ident[EI_PAD] = 0;
1241		ehdr->e_type = ET_CORE;
1242		ehdr->e_machine = ELF_ARCH;
1243		ehdr->e_version = EV_CURRENT;
1244		ehdr->e_entry = 0;
1245		ehdr->e_phoff = phoff;
1246		ehdr->e_flags = 0;
1247		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1248		ehdr->e_phentsize = sizeof(Elf_Phdr);
1249		ehdr->e_phnum = numsegs + 1;
1250		ehdr->e_shentsize = sizeof(Elf_Shdr);
1251		ehdr->e_shnum = 0;
1252		ehdr->e_shstrndx = SHN_UNDEF;
1253
1254		/*
1255		 * Fill in the program header entries.
1256		 */
1257		phdr = (Elf_Phdr *)((char *)dst + phoff);
1258
1259		/* The note segement. */
1260		phdr->p_type = PT_NOTE;
1261		phdr->p_offset = noteoff;
1262		phdr->p_vaddr = 0;
1263		phdr->p_paddr = 0;
1264		phdr->p_filesz = notesz;
1265		phdr->p_memsz = 0;
1266		phdr->p_flags = 0;
1267		phdr->p_align = 0;
1268		phdr++;
1269
1270		/* All the writable segments from the program. */
1271		phc.phdr = phdr;
1272		phc.offset = *off;
1273		each_writable_segment(td, cb_put_phdr, &phc);
1274	}
1275}
1276
1277static void
1278__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1279    const void *desc, size_t descsz)
1280{
1281	Elf_Note note;
1282
1283	note.n_namesz = strlen(name) + 1;
1284	note.n_descsz = descsz;
1285	note.n_type = type;
1286	if (dst != NULL)
1287		bcopy(&note, (char *)dst + *off, sizeof note);
1288	*off += sizeof note;
1289	if (dst != NULL)
1290		bcopy(name, (char *)dst + *off, note.n_namesz);
1291	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1292	if (dst != NULL)
1293		bcopy(desc, (char *)dst + *off, note.n_descsz);
1294	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1295}
1296
1297/*
1298 * Tell kern_execve.c about it, with a little help from the linker.
1299 */
1300static struct execsw __elfN(execsw) = {
1301	__CONCAT(exec_, __elfN(imgact)),
1302	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1303};
1304EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1305