imgact_elf.c revision 153585
1/*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 S�ren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer
12 *    in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/kern/imgact_elf.c 153585 2005-12-20 23:42:18Z alc $");
33
34#include "opt_compat.h"
35
36#include <sys/param.h>
37#include <sys/exec.h>
38#include <sys/fcntl.h>
39#include <sys/imgact.h>
40#include <sys/imgact_elf.h>
41#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mount.h>
45#include <sys/mutex.h>
46#include <sys/mman.h>
47#include <sys/namei.h>
48#include <sys/pioctl.h>
49#include <sys/proc.h>
50#include <sys/procfs.h>
51#include <sys/resourcevar.h>
52#include <sys/sf_buf.h>
53#include <sys/systm.h>
54#include <sys/signalvar.h>
55#include <sys/stat.h>
56#include <sys/sx.h>
57#include <sys/syscall.h>
58#include <sys/sysctl.h>
59#include <sys/sysent.h>
60#include <sys/vnode.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_param.h>
65#include <vm/pmap.h>
66#include <vm/vm_map.h>
67#include <vm/vm_object.h>
68#include <vm/vm_extern.h>
69
70#include <machine/elf.h>
71#include <machine/md_var.h>
72
73#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74#include <machine/fpu.h>
75#include <compat/ia32/ia32_reg.h>
76#endif
77
78#define OLD_EI_BRAND	8
79
80static int __elfN(check_header)(const Elf_Ehdr *hdr);
81static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
82    const char *interp);
83static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84    u_long *entry, size_t pagesize);
85static int __elfN(load_section)(struct proc *p,
86    struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
87    vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
88    vm_prot_t prot, size_t pagesize);
89static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
90
91SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
92    "");
93
94int __elfN(fallback_brand) = -1;
95SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
96    fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
97    __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
98TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
99    &__elfN(fallback_brand));
100
101int __elfN(can_exec_dyn) = 0;
102SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
103	can_exec_dyn, CTLFLAG_RW, &__elfN(can_exec_dyn), 0,
104	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " can exec shared libraries");
105
106static int elf_trace = 0;
107SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
108
109static int elf_legacy_coredump = 0;
110SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
111    &elf_legacy_coredump, 0, "");
112
113static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
114
115int
116__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
117{
118	int i;
119
120	for (i = 0; i < MAX_BRANDS; i++) {
121		if (elf_brand_list[i] == NULL) {
122			elf_brand_list[i] = entry;
123			break;
124		}
125	}
126	if (i == MAX_BRANDS)
127		return (-1);
128	return (0);
129}
130
131int
132__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
133{
134	int i;
135
136	for (i = 0; i < MAX_BRANDS; i++) {
137		if (elf_brand_list[i] == entry) {
138			elf_brand_list[i] = NULL;
139			break;
140		}
141	}
142	if (i == MAX_BRANDS)
143		return (-1);
144	return (0);
145}
146
147int
148__elfN(brand_inuse)(Elf_Brandinfo *entry)
149{
150	struct proc *p;
151	int rval = FALSE;
152
153	sx_slock(&allproc_lock);
154	LIST_FOREACH(p, &allproc, p_list) {
155		if (p->p_sysent == entry->sysvec) {
156			rval = TRUE;
157			break;
158		}
159	}
160	sx_sunlock(&allproc_lock);
161
162	return (rval);
163}
164
165static Elf_Brandinfo *
166__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
167{
168	Elf_Brandinfo *bi;
169	int i;
170
171	/*
172	 * We support three types of branding -- (1) the ELF EI_OSABI field
173	 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
174	 * branding w/in the ELF header, and (3) path of the `interp_path'
175	 * field.  We should also look for an ".note.ABI-tag" ELF section now
176	 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
177	 */
178
179	/* If the executable has a brand, search for it in the brand list. */
180	for (i = 0; i < MAX_BRANDS; i++) {
181		bi = elf_brand_list[i];
182		if (bi != NULL && hdr->e_machine == bi->machine &&
183		    (hdr->e_ident[EI_OSABI] == bi->brand ||
184		    strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
185		    bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
186			return (bi);
187	}
188
189	/* Lacking a known brand, search for a recognized interpreter. */
190	if (interp != NULL) {
191		for (i = 0; i < MAX_BRANDS; i++) {
192			bi = elf_brand_list[i];
193			if (bi != NULL && hdr->e_machine == bi->machine &&
194			    strcmp(interp, bi->interp_path) == 0)
195				return (bi);
196		}
197	}
198
199	/* Lacking a recognized interpreter, try the default brand */
200	for (i = 0; i < MAX_BRANDS; i++) {
201		bi = elf_brand_list[i];
202		if (bi != NULL && hdr->e_machine == bi->machine &&
203		    __elfN(fallback_brand) == bi->brand)
204			return (bi);
205	}
206	return (NULL);
207}
208
209static int
210__elfN(check_header)(const Elf_Ehdr *hdr)
211{
212	Elf_Brandinfo *bi;
213	int i;
214
215	if (!IS_ELF(*hdr) ||
216	    hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
217	    hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
218	    hdr->e_ident[EI_VERSION] != EV_CURRENT ||
219	    hdr->e_phentsize != sizeof(Elf_Phdr) ||
220	    hdr->e_version != ELF_TARG_VER)
221		return (ENOEXEC);
222
223	/*
224	 * Make sure we have at least one brand for this machine.
225	 */
226
227	for (i = 0; i < MAX_BRANDS; i++) {
228		bi = elf_brand_list[i];
229		if (bi != NULL && bi->machine == hdr->e_machine)
230			break;
231	}
232	if (i == MAX_BRANDS)
233		return (ENOEXEC);
234
235	return (0);
236}
237
238static int
239__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
240    vm_offset_t start, vm_offset_t end, vm_prot_t prot)
241{
242	struct sf_buf *sf;
243	int error;
244	vm_offset_t off;
245
246	/*
247	 * Create the page if it doesn't exist yet. Ignore errors.
248	 */
249	vm_map_lock(map);
250	vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
251	    VM_PROT_ALL, VM_PROT_ALL, 0);
252	vm_map_unlock(map);
253
254	/*
255	 * Find the page from the underlying object.
256	 */
257	if (object) {
258		sf = vm_imgact_map_page(object, offset);
259		if (sf == NULL)
260			return (KERN_FAILURE);
261		off = offset - trunc_page(offset);
262		error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
263		    end - start);
264		vm_imgact_unmap_page(sf);
265		if (error) {
266			return (KERN_FAILURE);
267		}
268	}
269
270	return (KERN_SUCCESS);
271}
272
273static int
274__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
275    vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
276{
277	struct sf_buf *sf;
278	vm_offset_t off;
279	vm_size_t sz;
280	int error, rv;
281
282	if (start != trunc_page(start)) {
283		rv = __elfN(map_partial)(map, object, offset, start,
284		    round_page(start), prot);
285		if (rv)
286			return (rv);
287		offset += round_page(start) - start;
288		start = round_page(start);
289	}
290	if (end != round_page(end)) {
291		rv = __elfN(map_partial)(map, object, offset +
292		    trunc_page(end) - start, trunc_page(end), end, prot);
293		if (rv)
294			return (rv);
295		end = trunc_page(end);
296	}
297	if (end > start) {
298		if (offset & PAGE_MASK) {
299			/*
300			 * The mapping is not page aligned. This means we have
301			 * to copy the data. Sigh.
302			 */
303			rv = vm_map_find(map, NULL, 0, &start, end - start,
304			    FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
305			if (rv)
306				return (rv);
307			if (object == NULL)
308				return (KERN_SUCCESS);
309			for (; start < end; start += sz) {
310				sf = vm_imgact_map_page(object, offset);
311				if (sf == NULL)
312					return (KERN_FAILURE);
313				off = offset - trunc_page(offset);
314				sz = end - start;
315				if (sz > PAGE_SIZE - off)
316					sz = PAGE_SIZE - off;
317				error = copyout((caddr_t)sf_buf_kva(sf) + off,
318				    (caddr_t)start, sz);
319				vm_imgact_unmap_page(sf);
320				if (error) {
321					return (KERN_FAILURE);
322				}
323				offset += sz;
324			}
325			rv = KERN_SUCCESS;
326		} else {
327			vm_map_lock(map);
328			rv = vm_map_insert(map, object, offset, start, end,
329			    prot, VM_PROT_ALL, cow);
330			vm_map_unlock(map);
331		}
332		return (rv);
333	} else {
334		return (KERN_SUCCESS);
335	}
336}
337
338static int
339__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
340	struct vnode *vp, vm_object_t object, vm_offset_t offset,
341	caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
342	size_t pagesize)
343{
344	struct sf_buf *sf;
345	size_t map_len;
346	vm_offset_t map_addr;
347	int error, rv, cow;
348	size_t copy_len;
349	vm_offset_t file_addr;
350
351	/*
352	 * It's necessary to fail if the filsz + offset taken from the
353	 * header is greater than the actual file pager object's size.
354	 * If we were to allow this, then the vm_map_find() below would
355	 * walk right off the end of the file object and into the ether.
356	 *
357	 * While I'm here, might as well check for something else that
358	 * is invalid: filsz cannot be greater than memsz.
359	 */
360	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
361	    filsz > memsz) {
362		uprintf("elf_load_section: truncated ELF file\n");
363		return (ENOEXEC);
364	}
365
366#define trunc_page_ps(va, ps)	((va) & ~(ps - 1))
367#define round_page_ps(va, ps)	(((va) + (ps - 1)) & ~(ps - 1))
368
369	map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
370	file_addr = trunc_page_ps(offset, pagesize);
371
372	/*
373	 * We have two choices.  We can either clear the data in the last page
374	 * of an oversized mapping, or we can start the anon mapping a page
375	 * early and copy the initialized data into that first page.  We
376	 * choose the second..
377	 */
378	if (memsz > filsz)
379		map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
380	else
381		map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
382
383	if (map_len != 0) {
384		vm_object_reference(object);
385
386		/* cow flags: don't dump readonly sections in core */
387		cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
388		    (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
389
390		rv = __elfN(map_insert)(&vmspace->vm_map,
391				      object,
392				      file_addr,	/* file offset */
393				      map_addr,		/* virtual start */
394				      map_addr + map_len,/* virtual end */
395				      prot,
396				      cow);
397		if (rv != KERN_SUCCESS) {
398			vm_object_deallocate(object);
399			return (EINVAL);
400		}
401
402		/* we can stop now if we've covered it all */
403		if (memsz == filsz) {
404			return (0);
405		}
406	}
407
408
409	/*
410	 * We have to get the remaining bit of the file into the first part
411	 * of the oversized map segment.  This is normally because the .data
412	 * segment in the file is extended to provide bss.  It's a neat idea
413	 * to try and save a page, but it's a pain in the behind to implement.
414	 */
415	copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
416	map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
417	map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
418	    map_addr;
419
420	/* This had damn well better be true! */
421	if (map_len != 0) {
422		rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
423		    map_addr + map_len, VM_PROT_ALL, 0);
424		if (rv != KERN_SUCCESS) {
425			return (EINVAL);
426		}
427	}
428
429	if (copy_len != 0) {
430		vm_offset_t off;
431
432		sf = vm_imgact_map_page(object, offset + filsz);
433		if (sf == NULL)
434			return (EIO);
435
436		/* send the page fragment to user space */
437		off = trunc_page_ps(offset + filsz, pagesize) -
438		    trunc_page(offset + filsz);
439		error = copyout((caddr_t)sf_buf_kva(sf) + off,
440		    (caddr_t)map_addr, copy_len);
441		vm_imgact_unmap_page(sf);
442		if (error) {
443			return (error);
444		}
445	}
446
447	/*
448	 * set it to the specified protection.
449	 * XXX had better undo the damage from pasting over the cracks here!
450	 */
451	vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
452	    round_page(map_addr + map_len),  prot, FALSE);
453
454	return (0);
455}
456
457/*
458 * Load the file "file" into memory.  It may be either a shared object
459 * or an executable.
460 *
461 * The "addr" reference parameter is in/out.  On entry, it specifies
462 * the address where a shared object should be loaded.  If the file is
463 * an executable, this value is ignored.  On exit, "addr" specifies
464 * where the file was actually loaded.
465 *
466 * The "entry" reference parameter is out only.  On exit, it specifies
467 * the entry point for the loaded file.
468 */
469static int
470__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
471	u_long *entry, size_t pagesize)
472{
473	struct {
474		struct nameidata nd;
475		struct vattr attr;
476		struct image_params image_params;
477	} *tempdata;
478	const Elf_Ehdr *hdr = NULL;
479	const Elf_Phdr *phdr = NULL;
480	struct nameidata *nd;
481	struct vmspace *vmspace = p->p_vmspace;
482	struct vattr *attr;
483	struct image_params *imgp;
484	vm_prot_t prot;
485	u_long rbase;
486	u_long base_addr = 0;
487	int vfslocked, error, i, numsegs;
488
489	if (curthread->td_proc != p)
490		panic("elf_load_file - thread");	/* XXXKSE DIAGNOSTIC */
491
492	tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
493	nd = &tempdata->nd;
494	attr = &tempdata->attr;
495	imgp = &tempdata->image_params;
496
497	/*
498	 * Initialize part of the common data
499	 */
500	imgp->proc = p;
501	imgp->attr = attr;
502	imgp->firstpage = NULL;
503	imgp->image_header = NULL;
504	imgp->object = NULL;
505	imgp->execlabel = NULL;
506
507	/* XXXKSE */
508	NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
509	    curthread);
510	vfslocked = 0;
511	if ((error = namei(nd)) != 0) {
512		nd->ni_vp = NULL;
513		goto fail;
514	}
515	vfslocked = NDHASGIANT(nd);
516	NDFREE(nd, NDF_ONLY_PNBUF);
517	imgp->vp = nd->ni_vp;
518
519	/*
520	 * Check permissions, modes, uid, etc on the file, and "open" it.
521	 */
522	error = exec_check_permissions(imgp);
523	if (error) {
524		VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
525		goto fail;
526	}
527
528	error = exec_map_first_page(imgp);
529	/*
530	 * Also make certain that the interpreter stays the same, so set
531	 * its VV_TEXT flag, too.
532	 */
533	if (error == 0)
534		nd->ni_vp->v_vflag |= VV_TEXT;
535
536	imgp->object = nd->ni_vp->v_object;
537	vm_object_reference(imgp->object);
538
539	VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
540	if (error)
541		goto fail;
542
543	hdr = (const Elf_Ehdr *)imgp->image_header;
544	if ((error = __elfN(check_header)(hdr)) != 0)
545		goto fail;
546	if (hdr->e_type == ET_DYN)
547		rbase = *addr;
548	else if (hdr->e_type == ET_EXEC)
549		rbase = 0;
550	else {
551		error = ENOEXEC;
552		goto fail;
553	}
554
555	/* Only support headers that fit within first page for now      */
556	/*    (multiplication of two Elf_Half fields will not overflow) */
557	if ((hdr->e_phoff > PAGE_SIZE) ||
558	    (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
559		error = ENOEXEC;
560		goto fail;
561	}
562
563	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
564
565	for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
566		if (phdr[i].p_type == PT_LOAD) {	/* Loadable segment */
567			prot = 0;
568			if (phdr[i].p_flags & PF_X)
569  				prot |= VM_PROT_EXECUTE;
570			if (phdr[i].p_flags & PF_W)
571  				prot |= VM_PROT_WRITE;
572			if (phdr[i].p_flags & PF_R)
573  				prot |= VM_PROT_READ;
574
575			if ((error = __elfN(load_section)(p, vmspace,
576			    nd->ni_vp, imgp->object, phdr[i].p_offset,
577			    (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
578			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
579			    pagesize)) != 0)
580				goto fail;
581			/*
582			 * Establish the base address if this is the
583			 * first segment.
584			 */
585			if (numsegs == 0)
586  				base_addr = trunc_page(phdr[i].p_vaddr +
587				    rbase);
588			numsegs++;
589		}
590	}
591	*addr = base_addr;
592	*entry = (unsigned long)hdr->e_entry + rbase;
593
594fail:
595	if (imgp->firstpage)
596		exec_unmap_first_page(imgp);
597	if (imgp->object)
598		vm_object_deallocate(imgp->object);
599
600	if (nd->ni_vp)
601		vrele(nd->ni_vp);
602
603	VFS_UNLOCK_GIANT(vfslocked);
604	free(tempdata, M_TEMP);
605
606	return (error);
607}
608
609static int
610__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
611{
612	const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
613	const Elf_Phdr *phdr;
614	Elf_Auxargs *elf_auxargs = NULL;
615	struct vmspace *vmspace;
616	vm_prot_t prot;
617	u_long text_size = 0, data_size = 0, total_size = 0;
618	u_long text_addr = 0, data_addr = 0;
619	u_long seg_size, seg_addr;
620	u_long addr, entry = 0, proghdr = 0;
621	int error = 0, i;
622	const char *interp = NULL;
623	Elf_Brandinfo *brand_info;
624	char *path;
625	struct thread *td = curthread;
626	struct sysentvec *sv;
627
628	/*
629	 * Do we have a valid ELF header ?
630	 */
631	if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC
632	&& (!__elfN(can_exec_dyn) || hdr->e_type != ET_DYN)))
633		return (-1);
634
635	/*
636	 * From here on down, we return an errno, not -1, as we've
637	 * detected an ELF file.
638	 */
639
640	if ((hdr->e_phoff > PAGE_SIZE) ||
641	    (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
642		/* Only support headers in first page for now */
643		return (ENOEXEC);
644	}
645	phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
646
647	/*
648	 * From this point on, we may have resources that need to be freed.
649	 */
650
651	VOP_UNLOCK(imgp->vp, 0, td);
652
653	for (i = 0; i < hdr->e_phnum; i++) {
654		switch (phdr[i].p_type) {
655	  	case PT_INTERP:	/* Path to interpreter */
656			if (phdr[i].p_filesz > MAXPATHLEN ||
657			    phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
658				error = ENOEXEC;
659				goto fail;
660			}
661			interp = imgp->image_header + phdr[i].p_offset;
662			break;
663		default:
664			break;
665		}
666	}
667
668	brand_info = __elfN(get_brandinfo)(hdr, interp);
669	if (brand_info == NULL) {
670		uprintf("ELF binary type \"%u\" not known.\n",
671		    hdr->e_ident[EI_OSABI]);
672		error = ENOEXEC;
673		goto fail;
674	}
675	sv = brand_info->sysvec;
676	if (interp != NULL && brand_info->interp_newpath != NULL)
677		interp = brand_info->interp_newpath;
678
679	exec_new_vmspace(imgp, sv);
680
681	vmspace = imgp->proc->p_vmspace;
682
683	for (i = 0; i < hdr->e_phnum; i++) {
684		switch (phdr[i].p_type) {
685		case PT_LOAD:	/* Loadable segment */
686			prot = 0;
687			if (phdr[i].p_flags & PF_X)
688  				prot |= VM_PROT_EXECUTE;
689			if (phdr[i].p_flags & PF_W)
690  				prot |= VM_PROT_WRITE;
691			if (phdr[i].p_flags & PF_R)
692  				prot |= VM_PROT_READ;
693
694#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
695			/*
696			 * Some x86 binaries assume read == executable,
697			 * notably the M3 runtime and therefore cvsup
698			 */
699			if (prot & VM_PROT_READ)
700				prot |= VM_PROT_EXECUTE;
701#endif
702
703			if ((error = __elfN(load_section)(imgp->proc, vmspace,
704			    imgp->vp, imgp->object, phdr[i].p_offset,
705			    (caddr_t)(uintptr_t)phdr[i].p_vaddr,
706			    phdr[i].p_memsz, phdr[i].p_filesz, prot,
707			    sv->sv_pagesize)) != 0)
708  				goto fail;
709
710			/*
711			 * If this segment contains the program headers,
712			 * remember their virtual address for the AT_PHDR
713			 * aux entry. Static binaries don't usually include
714			 * a PT_PHDR entry.
715			 */
716			if (phdr[i].p_offset == 0 &&
717			    hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
718				<= phdr[i].p_filesz)
719				proghdr = phdr[i].p_vaddr + hdr->e_phoff;
720
721			seg_addr = trunc_page(phdr[i].p_vaddr);
722			seg_size = round_page(phdr[i].p_memsz +
723			    phdr[i].p_vaddr - seg_addr);
724
725			/*
726			 * Is this .text or .data?  We can't use
727			 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
728			 * alpha terribly and possibly does other bad
729			 * things so we stick to the old way of figuring
730			 * it out:  If the segment contains the program
731			 * entry point, it's a text segment, otherwise it
732			 * is a data segment.
733			 *
734			 * Note that obreak() assumes that data_addr +
735			 * data_size == end of data load area, and the ELF
736			 * file format expects segments to be sorted by
737			 * address.  If multiple data segments exist, the
738			 * last one will be used.
739			 */
740			if (hdr->e_entry >= phdr[i].p_vaddr &&
741			    hdr->e_entry < (phdr[i].p_vaddr +
742			    phdr[i].p_memsz)) {
743				text_size = seg_size;
744				text_addr = seg_addr;
745				entry = (u_long)hdr->e_entry;
746			} else {
747				data_size = seg_size;
748				data_addr = seg_addr;
749			}
750			total_size += seg_size;
751			break;
752		case PT_PHDR: 	/* Program header table info */
753			proghdr = phdr[i].p_vaddr;
754			break;
755		default:
756			break;
757		}
758	}
759
760	if (data_addr == 0 && data_size == 0) {
761		data_addr = text_addr;
762		data_size = text_size;
763	}
764
765	/*
766	 * Check limits.  It should be safe to check the
767	 * limits after loading the segments since we do
768	 * not actually fault in all the segments pages.
769	 */
770	PROC_LOCK(imgp->proc);
771	if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
772	    text_size > maxtsiz ||
773	    total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
774		PROC_UNLOCK(imgp->proc);
775		error = ENOMEM;
776		goto fail;
777	}
778
779	vmspace->vm_tsize = text_size >> PAGE_SHIFT;
780	vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
781	vmspace->vm_dsize = data_size >> PAGE_SHIFT;
782	vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
783
784	/*
785	 * We load the dynamic linker where a userland call
786	 * to mmap(0, ...) would put it.  The rationale behind this
787	 * calculation is that it leaves room for the heap to grow to
788	 * its maximum allowed size.
789	 */
790	addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
791	    lim_max(imgp->proc, RLIMIT_DATA));
792	PROC_UNLOCK(imgp->proc);
793
794	imgp->entry_addr = entry;
795
796	imgp->proc->p_sysent = sv;
797	if (interp != NULL && brand_info->emul_path != NULL &&
798	    brand_info->emul_path[0] != '\0') {
799		path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
800		snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
801		    interp);
802		error = __elfN(load_file)(imgp->proc, path, &addr,
803		    &imgp->entry_addr, sv->sv_pagesize);
804		free(path, M_TEMP);
805		if (error == 0)
806			interp = NULL;
807	}
808	if (interp != NULL) {
809		error = __elfN(load_file)(imgp->proc, interp, &addr,
810		    &imgp->entry_addr, sv->sv_pagesize);
811		if (error != 0) {
812			uprintf("ELF interpreter %s not found\n", interp);
813			goto fail;
814		}
815	}
816
817	/*
818	 * Construct auxargs table (used by the fixup routine)
819	 */
820	elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
821	elf_auxargs->execfd = -1;
822	elf_auxargs->phdr = proghdr;
823	elf_auxargs->phent = hdr->e_phentsize;
824	elf_auxargs->phnum = hdr->e_phnum;
825	elf_auxargs->pagesz = PAGE_SIZE;
826	elf_auxargs->base = addr;
827	elf_auxargs->flags = 0;
828	elf_auxargs->entry = entry;
829	elf_auxargs->trace = elf_trace;
830
831	imgp->auxargs = elf_auxargs;
832	imgp->interpreted = 0;
833
834fail:
835	vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
836	return (error);
837}
838
839#define	suword __CONCAT(suword, __ELF_WORD_SIZE)
840
841int
842__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
843{
844	Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
845	Elf_Addr *base;
846	Elf_Addr *pos;
847
848	base = (Elf_Addr *)*stack_base;
849	pos = base + (imgp->args->argc + imgp->args->envc + 2);
850
851	if (args->trace) {
852		AUXARGS_ENTRY(pos, AT_DEBUG, 1);
853	}
854	if (args->execfd != -1) {
855		AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
856	}
857	AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
858	AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
859	AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
860	AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
861	AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
862	AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
863	AUXARGS_ENTRY(pos, AT_BASE, args->base);
864	AUXARGS_ENTRY(pos, AT_NULL, 0);
865
866	free(imgp->auxargs, M_TEMP);
867	imgp->auxargs = NULL;
868
869	base--;
870	suword(base, (long)imgp->args->argc);
871	*stack_base = (register_t *)base;
872	return (0);
873}
874
875/*
876 * Code for generating ELF core dumps.
877 */
878
879typedef void (*segment_callback)(vm_map_entry_t, void *);
880
881/* Closure for cb_put_phdr(). */
882struct phdr_closure {
883	Elf_Phdr *phdr;		/* Program header to fill in */
884	Elf_Off offset;		/* Offset of segment in core file */
885};
886
887/* Closure for cb_size_segment(). */
888struct sseg_closure {
889	int count;		/* Count of writable segments. */
890	size_t size;		/* Total size of all writable segments. */
891};
892
893static void cb_put_phdr(vm_map_entry_t, void *);
894static void cb_size_segment(vm_map_entry_t, void *);
895static void each_writable_segment(struct thread *, segment_callback, void *);
896static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
897    int, void *, size_t);
898static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
899static void __elfN(putnote)(void *, size_t *, const char *, int,
900    const void *, size_t);
901
902extern int osreldate;
903
904int
905__elfN(coredump)(td, vp, limit)
906	struct thread *td;
907	struct vnode *vp;
908	off_t limit;
909{
910	struct ucred *cred = td->td_ucred;
911	int error = 0;
912	struct sseg_closure seginfo;
913	void *hdr;
914	size_t hdrsize;
915
916	/* Size the program segments. */
917	seginfo.count = 0;
918	seginfo.size = 0;
919	each_writable_segment(td, cb_size_segment, &seginfo);
920
921	/*
922	 * Calculate the size of the core file header area by making
923	 * a dry run of generating it.  Nothing is written, but the
924	 * size is calculated.
925	 */
926	hdrsize = 0;
927	__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
928
929	if (hdrsize + seginfo.size >= limit)
930		return (EFAULT);
931
932	/*
933	 * Allocate memory for building the header, fill it up,
934	 * and write it out.
935	 */
936	hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
937	if (hdr == NULL) {
938		return (EINVAL);
939	}
940	error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
941
942	/* Write the contents of all of the writable segments. */
943	if (error == 0) {
944		Elf_Phdr *php;
945		off_t offset;
946		int i;
947
948		php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
949		offset = hdrsize;
950		for (i = 0; i < seginfo.count; i++) {
951			error = vn_rdwr_inchunks(UIO_WRITE, vp,
952			    (caddr_t)(uintptr_t)php->p_vaddr,
953			    php->p_filesz, offset, UIO_USERSPACE,
954			    IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
955			    curthread); /* XXXKSE */
956			if (error != 0)
957				break;
958			offset += php->p_filesz;
959			php++;
960		}
961	}
962	free(hdr, M_TEMP);
963
964	return (error);
965}
966
967/*
968 * A callback for each_writable_segment() to write out the segment's
969 * program header entry.
970 */
971static void
972cb_put_phdr(entry, closure)
973	vm_map_entry_t entry;
974	void *closure;
975{
976	struct phdr_closure *phc = (struct phdr_closure *)closure;
977	Elf_Phdr *phdr = phc->phdr;
978
979	phc->offset = round_page(phc->offset);
980
981	phdr->p_type = PT_LOAD;
982	phdr->p_offset = phc->offset;
983	phdr->p_vaddr = entry->start;
984	phdr->p_paddr = 0;
985	phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
986	phdr->p_align = PAGE_SIZE;
987	phdr->p_flags = 0;
988	if (entry->protection & VM_PROT_READ)
989		phdr->p_flags |= PF_R;
990	if (entry->protection & VM_PROT_WRITE)
991		phdr->p_flags |= PF_W;
992	if (entry->protection & VM_PROT_EXECUTE)
993		phdr->p_flags |= PF_X;
994
995	phc->offset += phdr->p_filesz;
996	phc->phdr++;
997}
998
999/*
1000 * A callback for each_writable_segment() to gather information about
1001 * the number of segments and their total size.
1002 */
1003static void
1004cb_size_segment(entry, closure)
1005	vm_map_entry_t entry;
1006	void *closure;
1007{
1008	struct sseg_closure *ssc = (struct sseg_closure *)closure;
1009
1010	ssc->count++;
1011	ssc->size += entry->end - entry->start;
1012}
1013
1014/*
1015 * For each writable segment in the process's memory map, call the given
1016 * function with a pointer to the map entry and some arbitrary
1017 * caller-supplied data.
1018 */
1019static void
1020each_writable_segment(td, func, closure)
1021	struct thread *td;
1022	segment_callback func;
1023	void *closure;
1024{
1025	struct proc *p = td->td_proc;
1026	vm_map_t map = &p->p_vmspace->vm_map;
1027	vm_map_entry_t entry;
1028
1029	for (entry = map->header.next; entry != &map->header;
1030	    entry = entry->next) {
1031		vm_object_t obj;
1032
1033		/*
1034		 * Don't dump inaccessible mappings, deal with legacy
1035		 * coredump mode.
1036		 *
1037		 * Note that read-only segments related to the elf binary
1038		 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1039		 * need to arbitrarily ignore such segments.
1040		 */
1041		if (elf_legacy_coredump) {
1042			if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1043				continue;
1044		} else {
1045			if ((entry->protection & VM_PROT_ALL) == 0)
1046				continue;
1047		}
1048
1049		/*
1050		 * Dont include memory segment in the coredump if
1051		 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1052		 * madvise(2).  Do not dump submaps (i.e. parts of the
1053		 * kernel map).
1054		 */
1055		if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1056			continue;
1057
1058		if ((obj = entry->object.vm_object) == NULL)
1059			continue;
1060
1061		/* Find the deepest backing object. */
1062		while (obj->backing_object != NULL)
1063			obj = obj->backing_object;
1064
1065		/* Ignore memory-mapped devices and such things. */
1066		if (obj->type != OBJT_DEFAULT &&
1067		    obj->type != OBJT_SWAP &&
1068		    obj->type != OBJT_VNODE)
1069			continue;
1070
1071		(*func)(entry, closure);
1072	}
1073}
1074
1075/*
1076 * Write the core file header to the file, including padding up to
1077 * the page boundary.
1078 */
1079static int
1080__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1081	struct thread *td;
1082	struct vnode *vp;
1083	struct ucred *cred;
1084	int numsegs;
1085	size_t hdrsize;
1086	void *hdr;
1087{
1088	size_t off;
1089
1090	/* Fill in the header. */
1091	bzero(hdr, hdrsize);
1092	off = 0;
1093	__elfN(puthdr)(td, hdr, &off, numsegs);
1094
1095	/* Write it to the core file. */
1096	return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1097	    UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1098	    td)); /* XXXKSE */
1099}
1100
1101#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1102typedef struct prstatus32 elf_prstatus_t;
1103typedef struct prpsinfo32 elf_prpsinfo_t;
1104typedef struct fpreg32 elf_prfpregset_t;
1105typedef struct fpreg32 elf_fpregset_t;
1106typedef struct reg32 elf_gregset_t;
1107#else
1108typedef prstatus_t elf_prstatus_t;
1109typedef prpsinfo_t elf_prpsinfo_t;
1110typedef prfpregset_t elf_prfpregset_t;
1111typedef prfpregset_t elf_fpregset_t;
1112typedef gregset_t elf_gregset_t;
1113#endif
1114
1115static void
1116__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1117{
1118	struct {
1119		elf_prstatus_t status;
1120		elf_prfpregset_t fpregset;
1121		elf_prpsinfo_t psinfo;
1122	} *tempdata;
1123	elf_prstatus_t *status;
1124	elf_prfpregset_t *fpregset;
1125	elf_prpsinfo_t *psinfo;
1126	struct proc *p;
1127	struct thread *thr;
1128	size_t ehoff, noteoff, notesz, phoff;
1129
1130	p = td->td_proc;
1131
1132	ehoff = *off;
1133	*off += sizeof(Elf_Ehdr);
1134
1135	phoff = *off;
1136	*off += (numsegs + 1) * sizeof(Elf_Phdr);
1137
1138	noteoff = *off;
1139	/*
1140	 * Don't allocate space for the notes if we're just calculating
1141	 * the size of the header. We also don't collect the data.
1142	 */
1143	if (dst != NULL) {
1144		tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1145		status = &tempdata->status;
1146		fpregset = &tempdata->fpregset;
1147		psinfo = &tempdata->psinfo;
1148	} else {
1149		tempdata = NULL;
1150		status = NULL;
1151		fpregset = NULL;
1152		psinfo = NULL;
1153	}
1154
1155	if (dst != NULL) {
1156		psinfo->pr_version = PRPSINFO_VERSION;
1157		psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1158		strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1159		/*
1160		 * XXX - We don't fill in the command line arguments properly
1161		 * yet.
1162		 */
1163		strlcpy(psinfo->pr_psargs, p->p_comm,
1164		    sizeof(psinfo->pr_psargs));
1165	}
1166	__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1167	    sizeof *psinfo);
1168
1169	/*
1170	 * To have the debugger select the right thread (LWP) as the initial
1171	 * thread, we dump the state of the thread passed to us in td first.
1172	 * This is the thread that causes the core dump and thus likely to
1173	 * be the right thread one wants to have selected in the debugger.
1174	 */
1175	thr = td;
1176	while (thr != NULL) {
1177		if (dst != NULL) {
1178			status->pr_version = PRSTATUS_VERSION;
1179			status->pr_statussz = sizeof(elf_prstatus_t);
1180			status->pr_gregsetsz = sizeof(elf_gregset_t);
1181			status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1182			status->pr_osreldate = osreldate;
1183			status->pr_cursig = p->p_sig;
1184			status->pr_pid = thr->td_tid;
1185#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1186			fill_regs32(thr, &status->pr_reg);
1187			fill_fpregs32(thr, fpregset);
1188#else
1189			fill_regs(thr, &status->pr_reg);
1190			fill_fpregs(thr, fpregset);
1191#endif
1192		}
1193		__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1194		    sizeof *status);
1195		__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1196		    sizeof *fpregset);
1197		/*
1198		 * Allow for MD specific notes, as well as any MD
1199		 * specific preparations for writing MI notes.
1200		 */
1201		__elfN(dump_thread)(thr, dst, off);
1202
1203		thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1204		    TAILQ_NEXT(thr, td_plist);
1205		if (thr == td)
1206			thr = TAILQ_NEXT(thr, td_plist);
1207	}
1208
1209	notesz = *off - noteoff;
1210
1211	if (dst != NULL)
1212		free(tempdata, M_TEMP);
1213
1214	/* Align up to a page boundary for the program segments. */
1215	*off = round_page(*off);
1216
1217	if (dst != NULL) {
1218		Elf_Ehdr *ehdr;
1219		Elf_Phdr *phdr;
1220		struct phdr_closure phc;
1221
1222		/*
1223		 * Fill in the ELF header.
1224		 */
1225		ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1226		ehdr->e_ident[EI_MAG0] = ELFMAG0;
1227		ehdr->e_ident[EI_MAG1] = ELFMAG1;
1228		ehdr->e_ident[EI_MAG2] = ELFMAG2;
1229		ehdr->e_ident[EI_MAG3] = ELFMAG3;
1230		ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1231		ehdr->e_ident[EI_DATA] = ELF_DATA;
1232		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1233		ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1234		ehdr->e_ident[EI_ABIVERSION] = 0;
1235		ehdr->e_ident[EI_PAD] = 0;
1236		ehdr->e_type = ET_CORE;
1237#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1238		ehdr->e_machine = EM_386;
1239#else
1240		ehdr->e_machine = ELF_ARCH;
1241#endif
1242		ehdr->e_version = EV_CURRENT;
1243		ehdr->e_entry = 0;
1244		ehdr->e_phoff = phoff;
1245		ehdr->e_flags = 0;
1246		ehdr->e_ehsize = sizeof(Elf_Ehdr);
1247		ehdr->e_phentsize = sizeof(Elf_Phdr);
1248		ehdr->e_phnum = numsegs + 1;
1249		ehdr->e_shentsize = sizeof(Elf_Shdr);
1250		ehdr->e_shnum = 0;
1251		ehdr->e_shstrndx = SHN_UNDEF;
1252
1253		/*
1254		 * Fill in the program header entries.
1255		 */
1256		phdr = (Elf_Phdr *)((char *)dst + phoff);
1257
1258		/* The note segement. */
1259		phdr->p_type = PT_NOTE;
1260		phdr->p_offset = noteoff;
1261		phdr->p_vaddr = 0;
1262		phdr->p_paddr = 0;
1263		phdr->p_filesz = notesz;
1264		phdr->p_memsz = 0;
1265		phdr->p_flags = 0;
1266		phdr->p_align = 0;
1267		phdr++;
1268
1269		/* All the writable segments from the program. */
1270		phc.phdr = phdr;
1271		phc.offset = *off;
1272		each_writable_segment(td, cb_put_phdr, &phc);
1273	}
1274}
1275
1276static void
1277__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1278    const void *desc, size_t descsz)
1279{
1280	Elf_Note note;
1281
1282	note.n_namesz = strlen(name) + 1;
1283	note.n_descsz = descsz;
1284	note.n_type = type;
1285	if (dst != NULL)
1286		bcopy(&note, (char *)dst + *off, sizeof note);
1287	*off += sizeof note;
1288	if (dst != NULL)
1289		bcopy(name, (char *)dst + *off, note.n_namesz);
1290	*off += roundup2(note.n_namesz, sizeof(Elf_Size));
1291	if (dst != NULL)
1292		bcopy(desc, (char *)dst + *off, note.n_descsz);
1293	*off += roundup2(note.n_descsz, sizeof(Elf_Size));
1294}
1295
1296/*
1297 * Tell kern_execve.c about it, with a little help from the linker.
1298 */
1299static struct execsw __elfN(execsw) = {
1300	__CONCAT(exec_, __elfN(imgact)),
1301	__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1302};
1303EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1304