imgact_elf.c revision 196653
12966Swollman/*-
250477Speter * Copyright (c) 2000 David O'Brien
31590Srgrimes * Copyright (c) 1995-1996 S�ren Schmidt
44699Sjkh * Copyright (c) 1996 Peter Wemm
534706Sbde * All rights reserved.
650634Speter *
75766Sbde * Redistribution and use in source and binary forms, with or without
81930Swollman * modification, are permitted provided that the following conditions
91930Swollman * are met:
1038653Sgpalmer * 1. Redistributions of source code must retain the above copyright
1138653Sgpalmer *    notice, this list of conditions and the following disclaimer
1238653Sgpalmer *    in this position and unchanged.
1338653Sgpalmer * 2. Redistributions in binary form must reproduce the above copyright
1438653Sgpalmer *    notice, this list of conditions and the following disclaimer in the
1557013Sobrien *    documentation and/or other materials provided with the distribution.
1638653Sgpalmer * 3. The name of the author may not be used to endorse or promote products
1738653Sgpalmer *    derived from this software without specific prior written permission
1838653Sgpalmer *
1938653Sgpalmer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
2038653Sgpalmer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2138653Sgpalmer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2239614Sbde * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2338653Sgpalmer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2438653Sgpalmer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2538653Sgpalmer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2638653Sgpalmer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2738653Sgpalmer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2838653Sgpalmer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2938653Sgpalmer */
3038653Sgpalmer
3138653Sgpalmer#include <sys/cdefs.h>
3238653Sgpalmer__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 196653 2009-08-30 14:38:17Z bz $");
3338653Sgpalmer
3438653Sgpalmer#include "opt_compat.h"
3538653Sgpalmer
3638653Sgpalmer#include <sys/param.h>
3738653Sgpalmer#include <sys/exec.h>
3838653Sgpalmer#include <sys/fcntl.h>
3940826Sjoerg#include <sys/imgact.h>
4038653Sgpalmer#include <sys/imgact_elf.h>
4138653Sgpalmer#include <sys/kernel.h>
4238653Sgpalmer#include <sys/lock.h>
4338653Sgpalmer#include <sys/malloc.h>
4438653Sgpalmer#include <sys/mount.h>
4538653Sgpalmer#include <sys/mutex.h>
4638653Sgpalmer#include <sys/mman.h>
4738653Sgpalmer#include <sys/namei.h>
4838653Sgpalmer#include <sys/pioctl.h>
4938653Sgpalmer#include <sys/proc.h>
5038653Sgpalmer#include <sys/procfs.h>
5141036Sdima#include <sys/resourcevar.h>
5238653Sgpalmer#include <sys/sf_buf.h>
5355024Smarcel#include <sys/systm.h>
5438653Sgpalmer#include <sys/signalvar.h>
5540334Speter#include <sys/stat.h>
5659632Swollman#include <sys/sx.h>
5738653Sgpalmer#include <sys/syscall.h>
5838653Sgpalmer#include <sys/sysctl.h>
5948839Ssimokawa#include <sys/sysent.h>
6038653Sgpalmer#include <sys/vnode.h>
6138653Sgpalmer
6238653Sgpalmer#include <vm/vm.h>
6338653Sgpalmer#include <vm/vm_kern.h>
6438653Sgpalmer#include <vm/vm_param.h>
6538653Sgpalmer#include <vm/pmap.h>
6638653Sgpalmer#include <vm/vm_map.h>
6738653Sgpalmer#include <vm/vm_object.h>
6838653Sgpalmer#include <vm/vm_extern.h>
6938653Sgpalmer
7038653Sgpalmer#include <machine/elf.h>
7138653Sgpalmer#include <machine/md_var.h>
7238653Sgpalmer
7338653Sgpalmer#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
7438653Sgpalmer#include <machine/fpu.h>
7538653Sgpalmer#include <compat/ia32/ia32_reg.h>
7638653Sgpalmer#endif
7738653Sgpalmer
7838653Sgpalmer#define OLD_EI_BRAND	8
7938653Sgpalmer
8039614Sbdestatic int __elfN(check_header)(const Elf_Ehdr *hdr);
8138653Sgpalmerstatic Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
8260789Sps    const char *interp, int32_t *osrel);
8360789Spsstatic int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
8460789Sps    u_long *entry, size_t pagesize);
8538653Sgpalmerstatic int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
8638653Sgpalmer    vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
8738653Sgpalmer    vm_prot_t prot, size_t pagesize);
8838653Sgpalmerstatic int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
8938653Sgpalmerstatic boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
9038653Sgpalmer    int32_t *osrel);
9138653Sgpalmerstatic boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
9238653Sgpalmerstatic boolean_t __elfN(check_note)(struct image_params *imgp,
9338653Sgpalmer    Elf_Brandnote *checknote, int32_t *osrel);
9438653Sgpalmer
9538653SgpalmerSYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
9638653Sgpalmer    "");
9738653Sgpalmer
9838653Sgpalmerint __elfN(fallback_brand) = -1;
9938653SgpalmerSYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
10039614Sbde    fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
10139614Sbde    __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
10238653SgpalmerTUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
10338653Sgpalmer    &__elfN(fallback_brand));
10438653Sgpalmer
10538653Sgpalmerstatic int elf_legacy_coredump = 0;
10638653SgpalmerSYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
10738653Sgpalmer    &elf_legacy_coredump, 0, "");
10838653Sgpalmer
10939914Sdfrstatic Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
11038653Sgpalmer
11138653Sgpalmer#define	trunc_page_ps(va, ps)	((va) & ~(ps - 1))
11238653Sgpalmer#define	round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
11338653Sgpalmer#define	aligned(a, t)	(trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
11438653Sgpalmer
11538653Sgpalmerstatic const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
11638653Sgpalmer
11738653SgpalmerElf_Brandnote __elfN(freebsd_brandnote) = {
11838653Sgpalmer	.hdr.n_namesz	= sizeof(FREEBSD_ABI_VENDOR),
11938653Sgpalmer	.hdr.n_descsz	= sizeof(int32_t),
12038653Sgpalmer	.hdr.n_type	= 1,
12138653Sgpalmer	.vendor		= FREEBSD_ABI_VENDOR,
12238653Sgpalmer	.flags		= BN_TRANSLATE_OSREL,
12338653Sgpalmer	.trans_osrel	= __elfN(freebsd_trans_osrel)
12438653Sgpalmer};
12538653Sgpalmer
12638653Sgpalmerstatic boolean_t
12738653Sgpalmer__elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
12838653Sgpalmer{
12938653Sgpalmer	uintptr_t p;
13038653Sgpalmer
13138653Sgpalmer	p = (uintptr_t)(note + 1);
13238653Sgpalmer	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
13338653Sgpalmer	*osrel = *(const int32_t *)(p);
13438653Sgpalmer
13538653Sgpalmer	return (TRUE);
13638653Sgpalmer}
13738653Sgpalmer
13838653Sgpalmerstatic const char GNU_ABI_VENDOR[] = "GNU";
13938653Sgpalmerstatic int GNU_KFREEBSD_ABI_DESC = 3;
14038653Sgpalmer
14138653SgpalmerElf_Brandnote __elfN(kfreebsd_brandnote) = {
14238653Sgpalmer	.hdr.n_namesz	= sizeof(GNU_ABI_VENDOR),
14338653Sgpalmer	.hdr.n_descsz	= 16,	/* XXX at least 16 */
14445701Sdes	.hdr.n_type	= 1,
14538653Sgpalmer	.vendor		= GNU_ABI_VENDOR,
14638653Sgpalmer	.flags		= BN_TRANSLATE_OSREL,
14738653Sgpalmer	.trans_osrel	= kfreebsd_trans_osrel
14838653Sgpalmer};
14941035Sdima
15038653Sgpalmerstatic boolean_t
15138653Sgpalmerkfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
15238653Sgpalmer{
15338653Sgpalmer	const Elf32_Word *desc;
15438653Sgpalmer	uintptr_t p;
15538653Sgpalmer
15638653Sgpalmer	p = (uintptr_t)(note + 1);
15738653Sgpalmer	p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
15838653Sgpalmer
15938653Sgpalmer	desc = (const Elf32_Word *)p;
16038653Sgpalmer	if (desc[0] != GNU_KFREEBSD_ABI_DESC)
16138653Sgpalmer		return (FALSE);
16239928Ssef
16338653Sgpalmer	/*
16438653Sgpalmer	 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
16538653Sgpalmer	 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
16638653Sgpalmer	 */
16738653Sgpalmer	*osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
16838653Sgpalmer
16938653Sgpalmer	return (TRUE);
17038653Sgpalmer}
17138653Sgpalmer
17238653Sgpalmerint
17338653Sgpalmer__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
17438653Sgpalmer{
17538653Sgpalmer	int i;
17638653Sgpalmer
17738653Sgpalmer	for (i = 0; i < MAX_BRANDS; i++) {
17838653Sgpalmer		if (elf_brand_list[i] == NULL) {
17938653Sgpalmer			elf_brand_list[i] = entry;
18041062Sbde			break;
18138653Sgpalmer		}
18238653Sgpalmer	}
18338653Sgpalmer	if (i == MAX_BRANDS)
18438653Sgpalmer		return (-1);
18538653Sgpalmer	return (0);
18638653Sgpalmer}
18738653Sgpalmer
18838653Sgpalmerint
18938653Sgpalmer__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
19038653Sgpalmer{
19138653Sgpalmer	int i;
19238653Sgpalmer
19338653Sgpalmer	for (i = 0; i < MAX_BRANDS; i++) {
19438653Sgpalmer		if (elf_brand_list[i] == entry) {
19538653Sgpalmer			elf_brand_list[i] = NULL;
19638653Sgpalmer			break;
19738653Sgpalmer		}
19838653Sgpalmer	}
19938653Sgpalmer	if (i == MAX_BRANDS)
2001590Srgrimes		return (-1);
20151996Smarkm	return (0);
20251996Smarkm}
20351996Smarkm
20439614Sbdeint
20534554Sjb__elfN(brand_inuse)(Elf_Brandinfo *entry)
20634554Sjb{
20734554Sjb	struct proc *p;
20853935Speter	int rval = FALSE;
20953909Speter
21053935Speter	sx_slock(&allproc_lock);
21153909Speter	FOREACH_PROC_IN_SYSTEM(p) {
21253909Speter		if (p->p_sysent == entry->sysvec) {
21353909Speter			rval = TRUE;
21453909Speter			break;
21534554Sjb		}
21636064Sjb	}
21738653Sgpalmer	sx_sunlock(&allproc_lock);
21851800Smarcel
21938653Sgpalmer	return (rval);
22038653Sgpalmer}
22152406Sbp
22252702Sbpstatic Elf_Brandinfo *
22338653Sgpalmer__elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
22438653Sgpalmer    int32_t *osrel)
22538653Sgpalmer{
22638653Sgpalmer	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
22738653Sgpalmer	Elf_Brandinfo *bi;
22841035Sdima	boolean_t ret;
22936064Sjb	int i;
23034554Sjb
23156279Sobrien	/*
23256279Sobrien	 * We support four types of branding -- (1) the ELF EI_OSABI field
23356279Sobrien	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
23456279Sobrien	 * branding w/in the ELF header, (3) path of the `interp_path'
2351590Srgrimes	 * field, and (4) the ".note.ABI-tag" ELF section.
236	 */
237
238	/* Look for an ".note.ABI-tag" ELF section */
239	for (i = 0; i < MAX_BRANDS; i++) {
240		bi = elf_brand_list[i];
241		if (bi == NULL)
242			continue;
243		if (hdr->e_machine == bi->machine && (bi->flags &
244		    (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
245			ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
246			if (ret)
247				return (bi);
248		}
249	}
250
251	/* If the executable has a brand, search for it in the brand list. */
252	for (i = 0; i < MAX_BRANDS; i++) {
253		bi = elf_brand_list[i];
254		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
255			continue;
256		if (hdr->e_machine == bi->machine &&
257		    (hdr->e_ident[EI_OSABI] == bi->brand ||
258		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
259		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
260			return (bi);
261	}
262
263	/* Lacking a known brand, search for a recognized interpreter. */
264	if (interp != NULL) {
265		for (i = 0; i < MAX_BRANDS; i++) {
266			bi = elf_brand_list[i];
267			if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
268				continue;
269			if (hdr->e_machine == bi->machine &&
270			    strcmp(interp, bi->interp_path) == 0)
271				return (bi);
272		}
273	}
274
275	/* Lacking a recognized interpreter, try the default brand */
276	for (i = 0; i < MAX_BRANDS; i++) {
277		bi = elf_brand_list[i];
278		if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
279			continue;
280		if (hdr->e_machine == bi->machine &&
281		    __elfN(fallback_brand) == bi->brand)
282			return (bi);
283	}
284	return (NULL);
285}
286
287static int
288__elfN(check_header)(const Elf_Ehdr *hdr)
289{
290	Elf_Brandinfo *bi;
291	int i;
292
293	if (!IS_ELF(*hdr) ||
294	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
295	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
296	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
297	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
298	    hdr->e_version != ELF_TARG_VER)
299		return (ENOEXEC);
300
301	/*
302	 * Make sure we have at least one brand for this machine.
303	 */
304
305	for (i = 0; i < MAX_BRANDS; i++) {
306		bi = elf_brand_list[i];
307		if (bi != NULL && bi->machine == hdr->e_machine)
308			break;
309	}
310	if (i == MAX_BRANDS)
311		return (ENOEXEC);
312
313	return (0);
314}
315
316static int
317__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
318    vm_offset_t start, vm_offset_t end, vm_prot_t prot)
319{
320	struct sf_buf *sf;
321	int error;
322	vm_offset_t off;
323
324	/*
325	 * Create the page if it doesn't exist yet. Ignore errors.
326	 */
327	vm_map_lock(map);
328	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
329	    VM_PROT_ALL, VM_PROT_ALL, 0);
330	vm_map_unlock(map);
331
332	/*
333	 * Find the page from the underlying object.
334	 */
335	if (object) {
336		sf = vm_imgact_map_page(object, offset);
337		if (sf == NULL)
338			return (KERN_FAILURE);
339		off = offset - trunc_page(offset);
340		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
341		    end - start);
342		vm_imgact_unmap_page(sf);
343		if (error) {
344			return (KERN_FAILURE);
345		}
346	}
347
348	return (KERN_SUCCESS);
349}
350
351static int
352__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
353    vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
354{
355	struct sf_buf *sf;
356	vm_offset_t off;
357	vm_size_t sz;
358	int error, rv;
359
360	if (start != trunc_page(start)) {
361		rv = __elfN(map_partial)(map, object, offset, start,
362		    round_page(start), prot);
363		if (rv)
364			return (rv);
365		offset += round_page(start) - start;
366		start = round_page(start);
367	}
368	if (end != round_page(end)) {
369		rv = __elfN(map_partial)(map, object, offset +
370		    trunc_page(end) - start, trunc_page(end), end, prot);
371		if (rv)
372			return (rv);
373		end = trunc_page(end);
374	}
375	if (end > start) {
376		if (offset & PAGE_MASK) {
377			/*
378			 * The mapping is not page aligned. This means we have
379			 * to copy the data. Sigh.
380			 */
381			rv = vm_map_find(map, NULL, 0, &start, end - start,
382			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
383			if (rv)
384				return (rv);
385			if (object == NULL)
386				return (KERN_SUCCESS);
387			for (; start < end; start += sz) {
388				sf = vm_imgact_map_page(object, offset);
389				if (sf == NULL)
390					return (KERN_FAILURE);
391				off = offset - trunc_page(offset);
392				sz = end - start;
393				if (sz > PAGE_SIZE - off)
394					sz = PAGE_SIZE - off;
395				error = copyout((caddr_t)sf_buf_kva(sf) + off,
396				    (caddr_t)start, sz);
397				vm_imgact_unmap_page(sf);
398				if (error) {
399					return (KERN_FAILURE);
400				}
401				offset += sz;
402			}
403			rv = KERN_SUCCESS;
404		} else {
405			vm_object_reference(object);
406			vm_map_lock(map);
407			rv = vm_map_insert(map, object, offset, start, end,
408			    prot, VM_PROT_ALL, cow);
409			vm_map_unlock(map);
410			if (rv != KERN_SUCCESS)
411				vm_object_deallocate(object);
412		}
413		return (rv);
414	} else {
415		return (KERN_SUCCESS);
416	}
417}
418
419static int
420__elfN(load_section)(struct vmspace *vmspace,
421	vm_object_t object, vm_offset_t offset,
422	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
423	size_t pagesize)
424{
425	struct sf_buf *sf;
426	size_t map_len;
427	vm_offset_t map_addr;
428	int error, rv, cow;
429	size_t copy_len;
430	vm_offset_t file_addr;
431
432	/*
433	 * It's necessary to fail if the filsz + offset taken from the
434	 * header is greater than the actual file pager object's size.
435	 * If we were to allow this, then the vm_map_find() below would
436	 * walk right off the end of the file object and into the ether.
437	 *
438	 * While I'm here, might as well check for something else that
439	 * is invalid: filsz cannot be greater than memsz.
440	 */
441	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
442	    filsz > memsz) {
443		uprintf("elf_load_section: truncated ELF file\n");
444		return (ENOEXEC);
445	}
446
447	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
448	file_addr = trunc_page_ps(offset, pagesize);
449
450	/*
451	 * We have two choices.  We can either clear the data in the last page
452	 * of an oversized mapping, or we can start the anon mapping a page
453	 * early and copy the initialized data into that first page.  We
454	 * choose the second..
455	 */
456	if (memsz > filsz)
457		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
458	else
459		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
460
461	if (map_len != 0) {
462		/* cow flags: don't dump readonly sections in core */
463		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
464		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
465
466		rv = __elfN(map_insert)(&vmspace->vm_map,
467				      object,
468				      file_addr,	/* file offset */
469				      map_addr,		/* virtual start */
470				      map_addr + map_len,/* virtual end */
471				      prot,
472				      cow);
473		if (rv != KERN_SUCCESS)
474			return (EINVAL);
475
476		/* we can stop now if we've covered it all */
477		if (memsz == filsz) {
478			return (0);
479		}
480	}
481
482
483	/*
484	 * We have to get the remaining bit of the file into the first part
485	 * of the oversized map segment.  This is normally because the .data
486	 * segment in the file is extended to provide bss.  It's a neat idea
487	 * to try and save a page, but it's a pain in the behind to implement.
488	 */
489	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
490	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
491	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
492	    map_addr;
493
494	/* This had damn well better be true! */
495	if (map_len != 0) {
496		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
497		    map_addr + map_len, VM_PROT_ALL, 0);
498		if (rv != KERN_SUCCESS) {
499			return (EINVAL);
500		}
501	}
502
503	if (copy_len != 0) {
504		vm_offset_t off;
505
506		sf = vm_imgact_map_page(object, offset + filsz);
507		if (sf == NULL)
508			return (EIO);
509
510		/* send the page fragment to user space */
511		off = trunc_page_ps(offset + filsz, pagesize) -
512		    trunc_page(offset + filsz);
513		error = copyout((caddr_t)sf_buf_kva(sf) + off,
514		    (caddr_t)map_addr, copy_len);
515		vm_imgact_unmap_page(sf);
516		if (error) {
517			return (error);
518		}
519	}
520
521	/*
522	 * set it to the specified protection.
523	 * XXX had better undo the damage from pasting over the cracks here!
524	 */
525	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
526	    round_page(map_addr + map_len),  prot, FALSE);
527
528	return (0);
529}
530
531/*
532 * Load the file "file" into memory.  It may be either a shared object
533 * or an executable.
534 *
535 * The "addr" reference parameter is in/out.  On entry, it specifies
536 * the address where a shared object should be loaded.  If the file is
537 * an executable, this value is ignored.  On exit, "addr" specifies
538 * where the file was actually loaded.
539 *
540 * The "entry" reference parameter is out only.  On exit, it specifies
541 * the entry point for the loaded file.
542 */
543static int
544__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
545	u_long *entry, size_t pagesize)
546{
547	struct {
548		struct nameidata nd;
549		struct vattr attr;
550		struct image_params image_params;
551	} *tempdata;
552	const Elf_Ehdr *hdr = NULL;
553	const Elf_Phdr *phdr = NULL;
554	struct nameidata *nd;
555	struct vmspace *vmspace = p->p_vmspace;
556	struct vattr *attr;
557	struct image_params *imgp;
558	vm_prot_t prot;
559	u_long rbase;
560	u_long base_addr = 0;
561	int vfslocked, error, i, numsegs;
562
563	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
564	nd = &tempdata->nd;
565	attr = &tempdata->attr;
566	imgp = &tempdata->image_params;
567
568	/*
569	 * Initialize part of the common data
570	 */
571	imgp->proc = p;
572	imgp->attr = attr;
573	imgp->firstpage = NULL;
574	imgp->image_header = NULL;
575	imgp->object = NULL;
576	imgp->execlabel = NULL;
577
578	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
579	    curthread);
580	vfslocked = 0;
581	if ((error = namei(nd)) != 0) {
582		nd->ni_vp = NULL;
583		goto fail;
584	}
585	vfslocked = NDHASGIANT(nd);
586	NDFREE(nd, NDF_ONLY_PNBUF);
587	imgp->vp = nd->ni_vp;
588
589	/*
590	 * Check permissions, modes, uid, etc on the file, and "open" it.
591	 */
592	error = exec_check_permissions(imgp);
593	if (error)
594		goto fail;
595
596	error = exec_map_first_page(imgp);
597	if (error)
598		goto fail;
599
600	/*
601	 * Also make certain that the interpreter stays the same, so set
602	 * its VV_TEXT flag, too.
603	 */
604	nd->ni_vp->v_vflag |= VV_TEXT;
605
606	imgp->object = nd->ni_vp->v_object;
607
608	hdr = (const Elf_Ehdr *)imgp->image_header;
609	if ((error = __elfN(check_header)(hdr)) != 0)
610		goto fail;
611	if (hdr->e_type == ET_DYN)
612		rbase = *addr;
613	else if (hdr->e_type == ET_EXEC)
614		rbase = 0;
615	else {
616		error = ENOEXEC;
617		goto fail;
618	}
619
620	/* Only support headers that fit within first page for now      */
621	/*    (multiplication of two Elf_Half fields will not overflow) */
622	if ((hdr->e_phoff > PAGE_SIZE) ||
623	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
624		error = ENOEXEC;
625		goto fail;
626	}
627
628	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
629	if (!aligned(phdr, Elf_Addr)) {
630		error = ENOEXEC;
631		goto fail;
632	}
633
634	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
635		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
636			prot = 0;
637			if (phdr[i].p_flags & PF_X)
638  				prot |= VM_PROT_EXECUTE;
639			if (phdr[i].p_flags & PF_W)
640  				prot |= VM_PROT_WRITE;
641			if (phdr[i].p_flags & PF_R)
642  				prot |= VM_PROT_READ;
643
644			if ((error = __elfN(load_section)(vmspace,
645			    imgp->object, phdr[i].p_offset,
646			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
647			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
648			    pagesize)) != 0)
649				goto fail;
650			/*
651			 * Establish the base address if this is the
652			 * first segment.
653			 */
654			if (numsegs == 0)
655  				base_addr = trunc_page(phdr[i].p_vaddr +
656				    rbase);
657			numsegs++;
658		}
659	}
660	*addr = base_addr;
661	*entry = (unsigned long)hdr->e_entry + rbase;
662
663fail:
664	if (imgp->firstpage)
665		exec_unmap_first_page(imgp);
666
667	if (nd->ni_vp)
668		vput(nd->ni_vp);
669
670	VFS_UNLOCK_GIANT(vfslocked);
671	free(tempdata, M_TEMP);
672
673	return (error);
674}
675
676static int
677__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
678{
679	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
680	const Elf_Phdr *phdr;
681	Elf_Auxargs *elf_auxargs;
682	struct vmspace *vmspace;
683	vm_prot_t prot;
684	u_long text_size = 0, data_size = 0, total_size = 0;
685	u_long text_addr = 0, data_addr = 0;
686	u_long seg_size, seg_addr;
687	u_long addr, entry = 0, proghdr = 0;
688	int32_t osrel = 0;
689	int error = 0, i;
690	const char *interp = NULL, *newinterp = NULL;
691	Elf_Brandinfo *brand_info;
692	char *path;
693	struct sysentvec *sv;
694
695	/*
696	 * Do we have a valid ELF header ?
697	 *
698	 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
699	 * if particular brand doesn't support it.
700	 */
701	if (__elfN(check_header)(hdr) != 0 ||
702	    (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
703		return (-1);
704
705	/*
706	 * From here on down, we return an errno, not -1, as we've
707	 * detected an ELF file.
708	 */
709
710	if ((hdr->e_phoff > PAGE_SIZE) ||
711	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
712		/* Only support headers in first page for now */
713		return (ENOEXEC);
714	}
715	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
716	if (!aligned(phdr, Elf_Addr))
717		return (ENOEXEC);
718	for (i = 0; i < hdr->e_phnum; i++) {
719		if (phdr[i].p_type == PT_INTERP) {
720			/* Path to interpreter */
721			if (phdr[i].p_filesz > MAXPATHLEN ||
722			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
723				return (ENOEXEC);
724			interp = imgp->image_header + phdr[i].p_offset;
725			break;
726		}
727	}
728
729	brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
730	if (brand_info == NULL) {
731		uprintf("ELF binary type \"%u\" not known.\n",
732		    hdr->e_ident[EI_OSABI]);
733		return (ENOEXEC);
734	}
735	if (hdr->e_type == ET_DYN &&
736	    (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
737		return (ENOEXEC);
738	sv = brand_info->sysvec;
739	if (interp != NULL && brand_info->interp_newpath != NULL)
740		newinterp = brand_info->interp_newpath;
741
742	/*
743	 * Avoid a possible deadlock if the current address space is destroyed
744	 * and that address space maps the locked vnode.  In the common case,
745	 * the locked vnode's v_usecount is decremented but remains greater
746	 * than zero.  Consequently, the vnode lock is not needed by vrele().
747	 * However, in cases where the vnode lock is external, such as nullfs,
748	 * v_usecount may become zero.
749	 */
750	VOP_UNLOCK(imgp->vp, 0);
751
752	error = exec_new_vmspace(imgp, sv);
753	imgp->proc->p_sysent = sv;
754
755	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
756	if (error)
757		return (error);
758
759	vmspace = imgp->proc->p_vmspace;
760
761	for (i = 0; i < hdr->e_phnum; i++) {
762		switch (phdr[i].p_type) {
763		case PT_LOAD:	/* Loadable segment */
764			prot = 0;
765			if (phdr[i].p_flags & PF_X)
766  				prot |= VM_PROT_EXECUTE;
767			if (phdr[i].p_flags & PF_W)
768  				prot |= VM_PROT_WRITE;
769			if (phdr[i].p_flags & PF_R)
770  				prot |= VM_PROT_READ;
771
772#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
773			/*
774			 * Some x86 binaries assume read == executable,
775			 * notably the M3 runtime and therefore cvsup
776			 */
777			if (prot & VM_PROT_READ)
778				prot |= VM_PROT_EXECUTE;
779#endif
780
781			if ((error = __elfN(load_section)(vmspace,
782			    imgp->object, phdr[i].p_offset,
783			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
784			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
785			    sv->sv_pagesize)) != 0)
786				return (error);
787
788			/*
789			 * If this segment contains the program headers,
790			 * remember their virtual address for the AT_PHDR
791			 * aux entry. Static binaries don't usually include
792			 * a PT_PHDR entry.
793			 */
794			if (phdr[i].p_offset == 0 &&
795			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
796				<= phdr[i].p_filesz)
797				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
798
799			seg_addr = trunc_page(phdr[i].p_vaddr);
800			seg_size = round_page(phdr[i].p_memsz +
801			    phdr[i].p_vaddr - seg_addr);
802
803			/*
804			 * Is this .text or .data?  We can't use
805			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
806			 * alpha terribly and possibly does other bad
807			 * things so we stick to the old way of figuring
808			 * it out:  If the segment contains the program
809			 * entry point, it's a text segment, otherwise it
810			 * is a data segment.
811			 *
812			 * Note that obreak() assumes that data_addr +
813			 * data_size == end of data load area, and the ELF
814			 * file format expects segments to be sorted by
815			 * address.  If multiple data segments exist, the
816			 * last one will be used.
817			 */
818			if (hdr->e_entry >= phdr[i].p_vaddr &&
819			    hdr->e_entry < (phdr[i].p_vaddr +
820			    phdr[i].p_memsz)) {
821				text_size = seg_size;
822				text_addr = seg_addr;
823				entry = (u_long)hdr->e_entry;
824			} else {
825				data_size = seg_size;
826				data_addr = seg_addr;
827			}
828			total_size += seg_size;
829			break;
830		case PT_PHDR: 	/* Program header table info */
831			proghdr = phdr[i].p_vaddr;
832			break;
833		default:
834			break;
835		}
836	}
837
838	if (data_addr == 0 && data_size == 0) {
839		data_addr = text_addr;
840		data_size = text_size;
841	}
842
843	/*
844	 * Check limits.  It should be safe to check the
845	 * limits after loading the segments since we do
846	 * not actually fault in all the segments pages.
847	 */
848	PROC_LOCK(imgp->proc);
849	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
850	    text_size > maxtsiz ||
851	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
852		PROC_UNLOCK(imgp->proc);
853		return (ENOMEM);
854	}
855
856	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
857	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
858	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
859	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
860
861	/*
862	 * We load the dynamic linker where a userland call
863	 * to mmap(0, ...) would put it.  The rationale behind this
864	 * calculation is that it leaves room for the heap to grow to
865	 * its maximum allowed size.
866	 */
867	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
868	    lim_max(imgp->proc, RLIMIT_DATA));
869	PROC_UNLOCK(imgp->proc);
870
871	imgp->entry_addr = entry;
872
873	if (interp != NULL) {
874		int have_interp = FALSE;
875		VOP_UNLOCK(imgp->vp, 0);
876		if (brand_info->emul_path != NULL &&
877		    brand_info->emul_path[0] != '\0') {
878			path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
879			snprintf(path, MAXPATHLEN, "%s%s",
880			    brand_info->emul_path, interp);
881			error = __elfN(load_file)(imgp->proc, path, &addr,
882			    &imgp->entry_addr, sv->sv_pagesize);
883			free(path, M_TEMP);
884			if (error == 0)
885				have_interp = TRUE;
886		}
887		if (!have_interp && newinterp != NULL) {
888			error = __elfN(load_file)(imgp->proc, newinterp, &addr,
889			    &imgp->entry_addr, sv->sv_pagesize);
890			if (error == 0)
891				have_interp = TRUE;
892		}
893		if (!have_interp) {
894			error = __elfN(load_file)(imgp->proc, interp, &addr,
895			    &imgp->entry_addr, sv->sv_pagesize);
896		}
897		vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
898		if (error != 0) {
899			uprintf("ELF interpreter %s not found\n", interp);
900			return (error);
901		}
902	} else
903		addr = 0;
904
905	/*
906	 * Construct auxargs table (used by the fixup routine)
907	 */
908	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
909	elf_auxargs->execfd = -1;
910	elf_auxargs->phdr = proghdr;
911	elf_auxargs->phent = hdr->e_phentsize;
912	elf_auxargs->phnum = hdr->e_phnum;
913	elf_auxargs->pagesz = PAGE_SIZE;
914	elf_auxargs->base = addr;
915	elf_auxargs->flags = 0;
916	elf_auxargs->entry = entry;
917
918	imgp->auxargs = elf_auxargs;
919	imgp->interpreted = 0;
920	imgp->proc->p_osrel = osrel;
921
922	return (error);
923}
924
925#define	suword __CONCAT(suword, __ELF_WORD_SIZE)
926
927int
928__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
929{
930	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
931	Elf_Addr *base;
932	Elf_Addr *pos;
933
934	base = (Elf_Addr *)*stack_base;
935	pos = base + (imgp->args->argc + imgp->args->envc + 2);
936
937	if (args->execfd != -1)
938		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
939	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
940	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
941	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
942	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
943	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
944	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
945	AUXARGS_ENTRY(pos, AT_BASE, args->base);
946	if (imgp->execpathp != 0)
947		AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
948	AUXARGS_ENTRY(pos, AT_NULL, 0);
949
950	free(imgp->auxargs, M_TEMP);
951	imgp->auxargs = NULL;
952
953	base--;
954	suword(base, (long)imgp->args->argc);
955	*stack_base = (register_t *)base;
956	return (0);
957}
958
959/*
960 * Code for generating ELF core dumps.
961 */
962
963typedef void (*segment_callback)(vm_map_entry_t, void *);
964
965/* Closure for cb_put_phdr(). */
966struct phdr_closure {
967	Elf_Phdr *phdr;		/* Program header to fill in */
968	Elf_Off offset;		/* Offset of segment in core file */
969};
970
971/* Closure for cb_size_segment(). */
972struct sseg_closure {
973	int count;		/* Count of writable segments. */
974	size_t size;		/* Total size of all writable segments. */
975};
976
977static void cb_put_phdr(vm_map_entry_t, void *);
978static void cb_size_segment(vm_map_entry_t, void *);
979static void each_writable_segment(struct thread *, segment_callback, void *);
980static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
981    int, void *, size_t);
982static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
983static void __elfN(putnote)(void *, size_t *, const char *, int,
984    const void *, size_t);
985
986int
987__elfN(coredump)(td, vp, limit)
988	struct thread *td;
989	struct vnode *vp;
990	off_t limit;
991{
992	struct ucred *cred = td->td_ucred;
993	int error = 0;
994	struct sseg_closure seginfo;
995	void *hdr;
996	size_t hdrsize;
997
998	/* Size the program segments. */
999	seginfo.count = 0;
1000	seginfo.size = 0;
1001	each_writable_segment(td, cb_size_segment, &seginfo);
1002
1003	/*
1004	 * Calculate the size of the core file header area by making
1005	 * a dry run of generating it.  Nothing is written, but the
1006	 * size is calculated.
1007	 */
1008	hdrsize = 0;
1009	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
1010
1011	if (hdrsize + seginfo.size >= limit)
1012		return (EFAULT);
1013
1014	/*
1015	 * Allocate memory for building the header, fill it up,
1016	 * and write it out.
1017	 */
1018	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1019	if (hdr == NULL) {
1020		return (EINVAL);
1021	}
1022	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
1023
1024	/* Write the contents of all of the writable segments. */
1025	if (error == 0) {
1026		Elf_Phdr *php;
1027		off_t offset;
1028		int i;
1029
1030		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1031		offset = hdrsize;
1032		for (i = 0; i < seginfo.count; i++) {
1033			error = vn_rdwr_inchunks(UIO_WRITE, vp,
1034			    (caddr_t)(uintptr_t)php->p_vaddr,
1035			    php->p_filesz, offset, UIO_USERSPACE,
1036			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1037			    curthread);
1038			if (error != 0)
1039				break;
1040			offset += php->p_filesz;
1041			php++;
1042		}
1043	}
1044	free(hdr, M_TEMP);
1045
1046	return (error);
1047}
1048
1049/*
1050 * A callback for each_writable_segment() to write out the segment's
1051 * program header entry.
1052 */
1053static void
1054cb_put_phdr(entry, closure)
1055	vm_map_entry_t entry;
1056	void *closure;
1057{
1058	struct phdr_closure *phc = (struct phdr_closure *)closure;
1059	Elf_Phdr *phdr = phc->phdr;
1060
1061	phc->offset = round_page(phc->offset);
1062
1063	phdr->p_type = PT_LOAD;
1064	phdr->p_offset = phc->offset;
1065	phdr->p_vaddr = entry->start;
1066	phdr->p_paddr = 0;
1067	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1068	phdr->p_align = PAGE_SIZE;
1069	phdr->p_flags = 0;
1070	if (entry->protection & VM_PROT_READ)
1071		phdr->p_flags |= PF_R;
1072	if (entry->protection & VM_PROT_WRITE)
1073		phdr->p_flags |= PF_W;
1074	if (entry->protection & VM_PROT_EXECUTE)
1075		phdr->p_flags |= PF_X;
1076
1077	phc->offset += phdr->p_filesz;
1078	phc->phdr++;
1079}
1080
1081/*
1082 * A callback for each_writable_segment() to gather information about
1083 * the number of segments and their total size.
1084 */
1085static void
1086cb_size_segment(entry, closure)
1087	vm_map_entry_t entry;
1088	void *closure;
1089{
1090	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1091
1092	ssc->count++;
1093	ssc->size += entry->end - entry->start;
1094}
1095
1096/*
1097 * For each writable segment in the process's memory map, call the given
1098 * function with a pointer to the map entry and some arbitrary
1099 * caller-supplied data.
1100 */
1101static void
1102each_writable_segment(td, func, closure)
1103	struct thread *td;
1104	segment_callback func;
1105	void *closure;
1106{
1107	struct proc *p = td->td_proc;
1108	vm_map_t map = &p->p_vmspace->vm_map;
1109	vm_map_entry_t entry;
1110	vm_object_t backing_object, object;
1111	boolean_t ignore_entry;
1112
1113	vm_map_lock_read(map);
1114	for (entry = map->header.next; entry != &map->header;
1115	    entry = entry->next) {
1116		/*
1117		 * Don't dump inaccessible mappings, deal with legacy
1118		 * coredump mode.
1119		 *
1120		 * Note that read-only segments related to the elf binary
1121		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1122		 * need to arbitrarily ignore such segments.
1123		 */
1124		if (elf_legacy_coredump) {
1125			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1126				continue;
1127		} else {
1128			if ((entry->protection & VM_PROT_ALL) == 0)
1129				continue;
1130		}
1131
1132		/*
1133		 * Dont include memory segment in the coredump if
1134		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1135		 * madvise(2).  Do not dump submaps (i.e. parts of the
1136		 * kernel map).
1137		 */
1138		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1139			continue;
1140
1141		if ((object = entry->object.vm_object) == NULL)
1142			continue;
1143
1144		/* Ignore memory-mapped devices and such things. */
1145		VM_OBJECT_LOCK(object);
1146		while ((backing_object = object->backing_object) != NULL) {
1147			VM_OBJECT_LOCK(backing_object);
1148			VM_OBJECT_UNLOCK(object);
1149			object = backing_object;
1150		}
1151		ignore_entry = object->type != OBJT_DEFAULT &&
1152		    object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1153		VM_OBJECT_UNLOCK(object);
1154		if (ignore_entry)
1155			continue;
1156
1157		(*func)(entry, closure);
1158	}
1159	vm_map_unlock_read(map);
1160}
1161
1162/*
1163 * Write the core file header to the file, including padding up to
1164 * the page boundary.
1165 */
1166static int
1167__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1168	struct thread *td;
1169	struct vnode *vp;
1170	struct ucred *cred;
1171	int numsegs;
1172	size_t hdrsize;
1173	void *hdr;
1174{
1175	size_t off;
1176
1177	/* Fill in the header. */
1178	bzero(hdr, hdrsize);
1179	off = 0;
1180	__elfN(puthdr)(td, hdr, &off, numsegs);
1181
1182	/* Write it to the core file. */
1183	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1184	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1185	    td));
1186}
1187
1188#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1189typedef struct prstatus32 elf_prstatus_t;
1190typedef struct prpsinfo32 elf_prpsinfo_t;
1191typedef struct fpreg32 elf_prfpregset_t;
1192typedef struct fpreg32 elf_fpregset_t;
1193typedef struct reg32 elf_gregset_t;
1194#else
1195typedef prstatus_t elf_prstatus_t;
1196typedef prpsinfo_t elf_prpsinfo_t;
1197typedef prfpregset_t elf_prfpregset_t;
1198typedef prfpregset_t elf_fpregset_t;
1199typedef gregset_t elf_gregset_t;
1200#endif
1201
1202static void
1203__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1204{
1205	struct {
1206		elf_prstatus_t status;
1207		elf_prfpregset_t fpregset;
1208		elf_prpsinfo_t psinfo;
1209	} *tempdata;
1210	elf_prstatus_t *status;
1211	elf_prfpregset_t *fpregset;
1212	elf_prpsinfo_t *psinfo;
1213	struct proc *p;
1214	struct thread *thr;
1215	size_t ehoff, noteoff, notesz, phoff;
1216
1217	p = td->td_proc;
1218
1219	ehoff = *off;
1220	*off += sizeof(Elf_Ehdr);
1221
1222	phoff = *off;
1223	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1224
1225	noteoff = *off;
1226	/*
1227	 * Don't allocate space for the notes if we're just calculating
1228	 * the size of the header. We also don't collect the data.
1229	 */
1230	if (dst != NULL) {
1231		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1232		status = &tempdata->status;
1233		fpregset = &tempdata->fpregset;
1234		psinfo = &tempdata->psinfo;
1235	} else {
1236		tempdata = NULL;
1237		status = NULL;
1238		fpregset = NULL;
1239		psinfo = NULL;
1240	}
1241
1242	if (dst != NULL) {
1243		psinfo->pr_version = PRPSINFO_VERSION;
1244		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1245		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1246		/*
1247		 * XXX - We don't fill in the command line arguments properly
1248		 * yet.
1249		 */
1250		strlcpy(psinfo->pr_psargs, p->p_comm,
1251		    sizeof(psinfo->pr_psargs));
1252	}
1253	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1254	    sizeof *psinfo);
1255
1256	/*
1257	 * To have the debugger select the right thread (LWP) as the initial
1258	 * thread, we dump the state of the thread passed to us in td first.
1259	 * This is the thread that causes the core dump and thus likely to
1260	 * be the right thread one wants to have selected in the debugger.
1261	 */
1262	thr = td;
1263	while (thr != NULL) {
1264		if (dst != NULL) {
1265			status->pr_version = PRSTATUS_VERSION;
1266			status->pr_statussz = sizeof(elf_prstatus_t);
1267			status->pr_gregsetsz = sizeof(elf_gregset_t);
1268			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1269			status->pr_osreldate = osreldate;
1270			status->pr_cursig = p->p_sig;
1271			status->pr_pid = thr->td_tid;
1272#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1273			fill_regs32(thr, &status->pr_reg);
1274			fill_fpregs32(thr, fpregset);
1275#else
1276			fill_regs(thr, &status->pr_reg);
1277			fill_fpregs(thr, fpregset);
1278#endif
1279		}
1280		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1281		    sizeof *status);
1282		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1283		    sizeof *fpregset);
1284		/*
1285		 * Allow for MD specific notes, as well as any MD
1286		 * specific preparations for writing MI notes.
1287		 */
1288		__elfN(dump_thread)(thr, dst, off);
1289
1290		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1291		    TAILQ_NEXT(thr, td_plist);
1292		if (thr == td)
1293			thr = TAILQ_NEXT(thr, td_plist);
1294	}
1295
1296	notesz = *off - noteoff;
1297
1298	if (dst != NULL)
1299		free(tempdata, M_TEMP);
1300
1301	/* Align up to a page boundary for the program segments. */
1302	*off = round_page(*off);
1303
1304	if (dst != NULL) {
1305		Elf_Ehdr *ehdr;
1306		Elf_Phdr *phdr;
1307		struct phdr_closure phc;
1308
1309		/*
1310		 * Fill in the ELF header.
1311		 */
1312		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1313		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1314		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1315		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1316		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1317		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1318		ehdr->e_ident[EI_DATA] = ELF_DATA;
1319		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1320		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1321		ehdr->e_ident[EI_ABIVERSION] = 0;
1322		ehdr->e_ident[EI_PAD] = 0;
1323		ehdr->e_type = ET_CORE;
1324#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1325		ehdr->e_machine = EM_386;
1326#else
1327		ehdr->e_machine = ELF_ARCH;
1328#endif
1329		ehdr->e_version = EV_CURRENT;
1330		ehdr->e_entry = 0;
1331		ehdr->e_phoff = phoff;
1332		ehdr->e_flags = 0;
1333		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1334		ehdr->e_phentsize = sizeof(Elf_Phdr);
1335		ehdr->e_phnum = numsegs + 1;
1336		ehdr->e_shentsize = sizeof(Elf_Shdr);
1337		ehdr->e_shnum = 0;
1338		ehdr->e_shstrndx = SHN_UNDEF;
1339
1340		/*
1341		 * Fill in the program header entries.
1342		 */
1343		phdr = (Elf_Phdr *)((char *)dst + phoff);
1344
1345		/* The note segement. */
1346		phdr->p_type = PT_NOTE;
1347		phdr->p_offset = noteoff;
1348		phdr->p_vaddr = 0;
1349		phdr->p_paddr = 0;
1350		phdr->p_filesz = notesz;
1351		phdr->p_memsz = 0;
1352		phdr->p_flags = 0;
1353		phdr->p_align = 0;
1354		phdr++;
1355
1356		/* All the writable segments from the program. */
1357		phc.phdr = phdr;
1358		phc.offset = *off;
1359		each_writable_segment(td, cb_put_phdr, &phc);
1360	}
1361}
1362
1363static void
1364__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1365    const void *desc, size_t descsz)
1366{
1367	Elf_Note note;
1368
1369	note.n_namesz = strlen(name) + 1;
1370	note.n_descsz = descsz;
1371	note.n_type = type;
1372	if (dst != NULL)
1373		bcopy(&note, (char *)dst + *off, sizeof note);
1374	*off += sizeof note;
1375	if (dst != NULL)
1376		bcopy(name, (char *)dst + *off, note.n_namesz);
1377	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1378	if (dst != NULL)
1379		bcopy(desc, (char *)dst + *off, note.n_descsz);
1380	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1381}
1382
1383/*
1384 * Try to find the appropriate ABI-note section for checknote,
1385 * fetch the osreldate for binary from the ELF OSABI-note. Only the
1386 * first page of the image is searched, the same as for headers.
1387 */
1388static boolean_t
1389__elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1390    int32_t *osrel)
1391{
1392	const Elf_Note *note, *note0, *note_end;
1393	const Elf_Phdr *phdr, *pnote;
1394	const Elf_Ehdr *hdr;
1395	const char *note_name;
1396	int i;
1397
1398	pnote = NULL;
1399	hdr = (const Elf_Ehdr *)imgp->image_header;
1400	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1401
1402	for (i = 0; i < hdr->e_phnum; i++) {
1403		if (phdr[i].p_type == PT_NOTE) {
1404			pnote = &phdr[i];
1405			break;
1406		}
1407	}
1408
1409	if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1410	    pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1411		return (FALSE);
1412
1413	note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1414	note_end = (const Elf_Note *)(imgp->image_header +
1415	    pnote->p_offset + pnote->p_filesz);
1416	for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1417		if (!aligned(note, Elf32_Addr))
1418			return (FALSE);
1419		if (note->n_namesz != checknote->hdr.n_namesz ||
1420		    note->n_descsz != checknote->hdr.n_descsz ||
1421		    note->n_type != checknote->hdr.n_type)
1422			goto nextnote;
1423		note_name = (const char *)(note + 1);
1424		if (strncmp(checknote->vendor, note_name,
1425		    checknote->hdr.n_namesz) != 0)
1426			goto nextnote;
1427
1428		/*
1429		 * Fetch the osreldate for binary
1430		 * from the ELF OSABI-note if necessary.
1431		 */
1432		if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
1433		    checknote->trans_osrel != NULL)
1434			return (checknote->trans_osrel(note, osrel));
1435		return (TRUE);
1436
1437nextnote:
1438		note = (const Elf_Note *)((const char *)(note + 1) +
1439		    roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1440		    roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1441	}
1442
1443	return (FALSE);
1444}
1445
1446/*
1447 * Tell kern_execve.c about it, with a little help from the linker.
1448 */
1449static struct execsw __elfN(execsw) = {
1450	__CONCAT(exec_, __elfN(imgact)),
1451	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1452};
1453EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1454