kern_sharedpage.c revision 102561
1/*
2 * Copyright (c) 1993, David Greenman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_exec.c 102561 2002-08-29 06:17:48Z jake $
27 */
28
29#include "opt_ktrace.h"
30#include "opt_mac.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/sysproto.h>
37#include <sys/signalvar.h>
38#include <sys/kernel.h>
39#include <sys/mac.h>
40#include <sys/mount.h>
41#include <sys/filedesc.h>
42#include <sys/fcntl.h>
43#include <sys/acct.h>
44#include <sys/exec.h>
45#include <sys/imgact.h>
46#include <sys/imgact_elf.h>
47#include <sys/wait.h>
48#include <sys/malloc.h>
49#include <sys/proc.h>
50#include <sys/pioctl.h>
51#include <sys/namei.h>
52#include <sys/sysent.h>
53#include <sys/shm.h>
54#include <sys/sysctl.h>
55#include <sys/user.h>
56#include <sys/vnode.h>
57#ifdef KTRACE
58#include <sys/ktrace.h>
59#endif
60
61#include <vm/vm.h>
62#include <vm/vm_param.h>
63#include <vm/pmap.h>
64#include <vm/vm_page.h>
65#include <vm/vm_map.h>
66#include <vm/vm_kern.h>
67#include <vm/vm_extern.h>
68#include <vm/vm_object.h>
69#include <vm/vm_pager.h>
70
71#include <machine/reg.h>
72
73MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
74
75static MALLOC_DEFINE(M_ATEXEC, "atexec", "atexec callback");
76
77/*
78 * callout list for things to do at exec time
79 */
80struct execlist {
81	execlist_fn function;
82	TAILQ_ENTRY(execlist) next;
83};
84
85TAILQ_HEAD(exec_list_head, execlist);
86static struct exec_list_head exec_list = TAILQ_HEAD_INITIALIZER(exec_list);
87
88static register_t *exec_copyout_strings(struct image_params *);
89
90/* XXX This should be vm_size_t. */
91static u_long ps_strings = PS_STRINGS;
92SYSCTL_ULONG(_kern, KERN_PS_STRINGS, ps_strings, CTLFLAG_RD, &ps_strings,
93    0, "");
94
95/* XXX This should be vm_size_t. */
96static u_long usrstack = USRSTACK;
97SYSCTL_ULONG(_kern, KERN_USRSTACK, usrstack, CTLFLAG_RD, &usrstack, 0, "");
98
99u_long ps_arg_cache_limit = PAGE_SIZE / 16;
100SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
101    &ps_arg_cache_limit, 0, "");
102
103int ps_argsopen = 1;
104SYSCTL_INT(_kern, OID_AUTO, ps_argsopen, CTLFLAG_RW, &ps_argsopen, 0, "");
105
106#ifdef __ia64__
107/* XXX HACK */
108static int regstkpages = 256;
109SYSCTL_INT(_machdep, OID_AUTO, regstkpages, CTLFLAG_RW, &regstkpages, 0, "");
110#endif
111
112/*
113 * Each of the items is a pointer to a `const struct execsw', hence the
114 * double pointer here.
115 */
116static const struct execsw **execsw;
117
118#ifndef _SYS_SYSPROTO_H_
119struct execve_args {
120        char    *fname;
121        char    **argv;
122        char    **envv;
123};
124#endif
125
126/*
127 * execve() system call.
128 *
129 * MPSAFE
130 */
131int
132execve(td, uap)
133	struct thread *td;
134	register struct execve_args *uap;
135{
136	struct proc *p = td->td_proc;
137	struct nameidata nd, *ndp;
138	struct ucred *newcred = NULL, *oldcred;
139	struct uidinfo *euip;
140	register_t *stack_base;
141	int error, len, i;
142	struct image_params image_params, *imgp;
143	struct vattr attr;
144	int (*img_first)(struct image_params *);
145	struct pargs *oldargs = NULL, *newargs = NULL;
146	struct procsig *oldprocsig, *newprocsig;
147#ifdef KTRACE
148	struct vnode *tracevp = NULL;
149#endif
150	struct vnode *textvp = NULL;
151	int credential_changing;
152	int textset;
153
154	imgp = &image_params;
155
156	/*
157	 * Lock the process and set the P_INEXEC flag to indicate that
158	 * it should be left alone until we're done here.  This is
159	 * necessary to avoid race conditions - e.g. in ptrace() -
160	 * that might allow a local user to illicitly obtain elevated
161	 * privileges.
162	 */
163	PROC_LOCK(p);
164	KASSERT((p->p_flag & P_INEXEC) == 0,
165	    ("%s(): process already has P_INEXEC flag", __func__));
166	if (p->p_flag & P_KSES) {
167		if (thread_single(SNGLE_EXIT)) {
168			PROC_UNLOCK(p);
169			return (ERESTART);	/* Try again later. */
170		}
171		/*
172		 * If we get here all other threads are dead,
173		 * so unset the associated flags and lose KSE mode.
174		 */
175		p->p_flag &= ~P_KSES;
176		td->td_flags &= ~TDF_UNBOUND;
177		thread_single_end();
178	}
179	p->p_flag |= P_INEXEC;
180	PROC_UNLOCK(p);
181
182	/*
183	 * Initialize part of the common data
184	 */
185	imgp->proc = p;
186	imgp->uap = uap;
187	imgp->attr = &attr;
188	imgp->argc = imgp->envc = 0;
189	imgp->argv0 = NULL;
190	imgp->entry_addr = 0;
191	imgp->vmspace_destroyed = 0;
192	imgp->interpreted = 0;
193	imgp->interpreter_name[0] = '\0';
194	imgp->auxargs = NULL;
195	imgp->vp = NULL;
196	imgp->object = NULL;
197	imgp->firstpage = NULL;
198	imgp->ps_strings = 0;
199	imgp->auxarg_size = 0;
200
201	/*
202	 * Allocate temporary demand zeroed space for argument and
203	 *	environment strings
204	 */
205	imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX +
206	    PAGE_SIZE);
207	if (imgp->stringbase == NULL) {
208		error = ENOMEM;
209		mtx_lock(&Giant);
210		goto exec_fail;
211	}
212	imgp->stringp = imgp->stringbase;
213	imgp->stringspace = ARG_MAX;
214	imgp->image_header = imgp->stringbase + ARG_MAX;
215
216	/*
217	 * Translate the file name. namei() returns a vnode pointer
218	 *	in ni_vp amoung other things.
219	 */
220	ndp = &nd;
221	NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
222	    UIO_USERSPACE, uap->fname, td);
223
224	mtx_lock(&Giant);
225interpret:
226
227	error = namei(ndp);
228	if (error) {
229		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
230		    ARG_MAX + PAGE_SIZE);
231		goto exec_fail;
232	}
233
234	imgp->vp = ndp->ni_vp;
235	imgp->fname = uap->fname;
236
237	/*
238	 * Check file permissions (also 'opens' file)
239	 */
240	error = exec_check_permissions(imgp);
241	if (error)
242		goto exec_fail_dealloc;
243
244	if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0)
245		vm_object_reference(imgp->object);
246
247	/*
248	 * Set VV_TEXT now so no one can write to the executable while we're
249	 * activating it.
250	 *
251	 * Remember if this was set before and unset it in case this is not
252	 * actually an executable image.
253	 */
254	textset = imgp->vp->v_vflag & VV_TEXT;
255	imgp->vp->v_vflag |= VV_TEXT;
256
257	error = exec_map_first_page(imgp);
258	if (error)
259		goto exec_fail_dealloc;
260
261	/*
262	 *	If the current process has a special image activator it
263	 *	wants to try first, call it.   For example, emulating shell
264	 *	scripts differently.
265	 */
266	error = -1;
267	if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
268		error = img_first(imgp);
269
270	/*
271	 *	Loop through the list of image activators, calling each one.
272	 *	An activator returns -1 if there is no match, 0 on success,
273	 *	and an error otherwise.
274	 */
275	for (i = 0; error == -1 && execsw[i]; ++i) {
276		if (execsw[i]->ex_imgact == NULL ||
277		    execsw[i]->ex_imgact == img_first) {
278			continue;
279		}
280		error = (*execsw[i]->ex_imgact)(imgp);
281	}
282
283	if (error) {
284		if (error == -1) {
285			if (textset == 0)
286				imgp->vp->v_vflag &= ~VV_TEXT;
287			error = ENOEXEC;
288		}
289		goto exec_fail_dealloc;
290	}
291
292	/*
293	 * Special interpreter operation, cleanup and loop up to try to
294	 * activate the interpreter.
295	 */
296	if (imgp->interpreted) {
297		exec_unmap_first_page(imgp);
298		/*
299		 * VV_TEXT needs to be unset for scripts.  There is a short
300		 * period before we determine that something is a script where
301		 * VV_TEXT will be set. The vnode lock is held over this
302		 * entire period so nothing should illegitimately be blocked.
303		 */
304		imgp->vp->v_vflag &= ~VV_TEXT;
305		/* free name buffer and old vnode */
306		NDFREE(ndp, NDF_ONLY_PNBUF);
307		vput(ndp->ni_vp);
308		vm_object_deallocate(imgp->object);
309		imgp->object = NULL;
310		/* set new name to that of the interpreter */
311		NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
312		    UIO_SYSSPACE, imgp->interpreter_name, td);
313		goto interpret;
314	}
315
316	/*
317	 * Copy out strings (args and env) and initialize stack base
318	 */
319	if (p->p_sysent->sv_copyout_strings)
320		stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
321	else
322		stack_base = exec_copyout_strings(imgp);
323
324	/*
325	 * If custom stack fixup routine present for this process
326	 * let it do the stack setup.
327	 * Else stuff argument count as first item on stack
328	 */
329	if (p->p_sysent->sv_fixup)
330		(*p->p_sysent->sv_fixup)(&stack_base, imgp);
331	else
332		suword(--stack_base, imgp->argc);
333
334	/*
335	 * For security and other reasons, the file descriptor table cannot
336	 * be shared after an exec.
337	 */
338	FILEDESC_LOCK(p->p_fd);
339	if (p->p_fd->fd_refcnt > 1) {
340		struct filedesc *tmp;
341
342		tmp = fdcopy(td);
343		FILEDESC_UNLOCK(p->p_fd);
344		fdfree(td);
345		p->p_fd = tmp;
346	} else
347		FILEDESC_UNLOCK(p->p_fd);
348
349	/*
350	 * Malloc things before we need locks.
351	 */
352	newcred = crget();
353	euip = uifind(attr.va_uid);
354	i = imgp->endargs - imgp->stringbase;
355	if (ps_arg_cache_limit >= i + sizeof(struct pargs))
356		newargs = pargs_alloc(i);
357
358	/* close files on exec */
359	fdcloseexec(td);
360
361	/* Get a reference to the vnode prior to locking the proc */
362	VREF(ndp->ni_vp);
363
364	/*
365	 * For security and other reasons, signal handlers cannot
366	 * be shared after an exec. The new process gets a copy of the old
367	 * handlers. In execsigs(), the new process will have its signals
368	 * reset.
369	 */
370	PROC_LOCK(p);
371	mp_fixme("procsig needs a lock");
372	if (p->p_procsig->ps_refcnt > 1) {
373		oldprocsig = p->p_procsig;
374		PROC_UNLOCK(p);
375		MALLOC(newprocsig, struct procsig *, sizeof(struct procsig),
376		    M_SUBPROC, M_WAITOK);
377		bcopy(oldprocsig, newprocsig, sizeof(*newprocsig));
378		newprocsig->ps_refcnt = 1;
379		oldprocsig->ps_refcnt--;
380		PROC_LOCK(p);
381		p->p_procsig = newprocsig;
382		if (p->p_sigacts == &p->p_uarea->u_sigacts)
383			panic("shared procsig but private sigacts?");
384
385		p->p_uarea->u_sigacts = *p->p_sigacts;
386		p->p_sigacts = &p->p_uarea->u_sigacts;
387	}
388	/* Stop profiling */
389	stopprofclock(p);
390
391	/* reset caught signals */
392	execsigs(p);
393
394	/* name this process - nameiexec(p, ndp) */
395	len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
396	bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
397	p->p_comm[len] = 0;
398
399	/*
400	 * mark as execed, wakeup the process that vforked (if any) and tell
401	 * it that it now has its own resources back
402	 */
403	p->p_flag |= P_EXEC;
404	if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
405		p->p_flag &= ~P_PPWAIT;
406		wakeup(p->p_pptr);
407	}
408
409	/*
410	 * Implement image setuid/setgid.
411	 *
412	 * Don't honor setuid/setgid if the filesystem prohibits it or if
413	 * the process is being traced.
414	 */
415	oldcred = p->p_ucred;
416	credential_changing = 0;
417	credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid !=
418	    attr.va_uid;
419	credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid !=
420	    attr.va_gid;
421
422	if (credential_changing &&
423	    (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
424	    (p->p_flag & P_TRACED) == 0) {
425		/*
426		 * Turn off syscall tracing for set-id programs, except for
427		 * root.  Record any set-id flags first to make sure that
428		 * we do not regain any tracing during a possible block.
429		 */
430		setsugid(p);
431#ifdef KTRACE
432		if (p->p_tracep && suser_cred(oldcred, PRISON_ROOT)) {
433			mtx_lock(&ktrace_mtx);
434			p->p_traceflag = 0;
435			tracevp = p->p_tracep;
436			p->p_tracep = NULL;
437			mtx_unlock(&ktrace_mtx);
438		}
439#endif
440		/* Close any file descriptors 0..2 that reference procfs */
441		setugidsafety(td);
442		/* Make sure file descriptors 0..2 are in use.  */
443		error = fdcheckstd(td);
444		if (error != 0)
445			goto done1;
446		/*
447		 * Set the new credentials.
448		 */
449		crcopy(newcred, oldcred);
450		if (attr.va_mode & VSUID)
451			change_euid(newcred, euip);
452		if (attr.va_mode & VSGID)
453			change_egid(newcred, attr.va_gid);
454		/*
455		 * Implement correct POSIX saved-id behavior.
456		 */
457		change_svuid(newcred, newcred->cr_uid);
458		change_svgid(newcred, newcred->cr_gid);
459		p->p_ucred = newcred;
460		newcred = NULL;
461	} else {
462		if (oldcred->cr_uid == oldcred->cr_ruid &&
463		    oldcred->cr_gid == oldcred->cr_rgid)
464			p->p_flag &= ~P_SUGID;
465		/*
466		 * Implement correct POSIX saved-id behavior.
467		 *
468		 * XXX: It's not clear that the existing behavior is
469		 * POSIX-compliant.  A number of sources indicate that the
470		 * saved uid/gid should only be updated if the new ruid is
471		 * not equal to the old ruid, or the new euid is not equal
472		 * to the old euid and the new euid is not equal to the old
473		 * ruid.  The FreeBSD code always updates the saved uid/gid.
474		 * Also, this code uses the new (replaced) euid and egid as
475		 * the source, which may or may not be the right ones to use.
476		 */
477		if (oldcred->cr_svuid != oldcred->cr_uid ||
478		    oldcred->cr_svgid != oldcred->cr_gid) {
479			crcopy(newcred, oldcred);
480			change_svuid(newcred, newcred->cr_uid);
481			change_svgid(newcred, newcred->cr_gid);
482			p->p_ucred = newcred;
483			newcred = NULL;
484		}
485	}
486
487	/*
488	 * Store the vp for use in procfs.  This vnode was referenced prior
489	 * to locking the proc lock.
490	 */
491	textvp = p->p_textvp;
492	p->p_textvp = ndp->ni_vp;
493
494	/*
495	 * Notify others that we exec'd, and clear the P_INEXEC flag
496	 * as we're now a bona fide freshly-execed process.
497	 */
498	KNOTE(&p->p_klist, NOTE_EXEC);
499	p->p_flag &= ~P_INEXEC;
500
501	/*
502	 * If tracing the process, trap to debugger so breakpoints
503	 * can be set before the program executes.
504	 */
505	_STOPEVENT(p, S_EXEC, 0);
506
507	if (p->p_flag & P_TRACED)
508		psignal(p, SIGTRAP);
509
510	/* clear "fork but no exec" flag, as we _are_ execing */
511	p->p_acflag &= ~AFORK;
512
513	/* Free any previous argument cache */
514	oldargs = p->p_args;
515	p->p_args = NULL;
516
517	/* Set values passed into the program in registers. */
518	if (p->p_sysent->sv_setregs)
519		(*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
520		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
521	else
522		exec_setregs(td, imgp->entry_addr,
523		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
524
525	/* Cache arguments if they fit inside our allowance */
526	if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
527		bcopy(imgp->stringbase, newargs->ar_args, i);
528		p->p_args = newargs;
529		newargs = NULL;
530	}
531done1:
532	PROC_UNLOCK(p);
533
534
535	/*
536	 * Free any resources malloc'd earlier that we didn't use.
537	 */
538	uifree(euip);
539	if (newcred == NULL)
540		crfree(oldcred);
541	else
542		crfree(newcred);
543	/*
544	 * Handle deferred decrement of ref counts.
545	 */
546	if (textvp != NULL)
547		vrele(textvp);
548	if (ndp->ni_vp && error != 0)
549		vrele(ndp->ni_vp);
550#ifdef KTRACE
551	if (tracevp != NULL)
552		vrele(tracevp);
553#endif
554	if (oldargs != NULL)
555		pargs_drop(oldargs);
556	if (newargs != NULL)
557		pargs_drop(newargs);
558
559exec_fail_dealloc:
560
561	/*
562	 * free various allocated resources
563	 */
564	if (imgp->firstpage)
565		exec_unmap_first_page(imgp);
566
567	if (imgp->stringbase != NULL)
568		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
569		    ARG_MAX + PAGE_SIZE);
570
571	if (imgp->vp) {
572		NDFREE(ndp, NDF_ONLY_PNBUF);
573		vput(imgp->vp);
574	}
575
576	if (imgp->object)
577		vm_object_deallocate(imgp->object);
578
579	if (error == 0)
580		goto done2;
581
582exec_fail:
583	/* we're done here, clear P_INEXEC */
584	PROC_LOCK(p);
585	p->p_flag &= ~P_INEXEC;
586	PROC_UNLOCK(p);
587
588	if (imgp->vmspace_destroyed) {
589		/* sorry, no more process anymore. exit gracefully */
590		exit1(td, W_EXITCODE(0, SIGABRT));
591		/* NOT REACHED */
592		error = 0;
593	}
594done2:
595	mtx_unlock(&Giant);
596	return (error);
597}
598
599int
600exec_map_first_page(imgp)
601	struct image_params *imgp;
602{
603	int rv, i;
604	int initial_pagein;
605	vm_page_t ma[VM_INITIAL_PAGEIN];
606	vm_object_t object;
607
608	GIANT_REQUIRED;
609
610	if (imgp->firstpage) {
611		exec_unmap_first_page(imgp);
612	}
613
614	VOP_GETVOBJECT(imgp->vp, &object);
615
616	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
617
618	if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
619		initial_pagein = VM_INITIAL_PAGEIN;
620		if (initial_pagein > object->size)
621			initial_pagein = object->size;
622		for (i = 1; i < initial_pagein; i++) {
623			if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
624				if ((ma[i]->flags & PG_BUSY) || ma[i]->busy)
625					break;
626				if (ma[i]->valid)
627					break;
628				vm_page_busy(ma[i]);
629			} else {
630				ma[i] = vm_page_alloc(object, i,
631				    VM_ALLOC_NORMAL);
632				if (ma[i] == NULL)
633					break;
634			}
635		}
636		initial_pagein = i;
637
638		rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
639		ma[0] = vm_page_lookup(object, 0);
640
641		if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||
642		    (ma[0]->valid == 0)) {
643			if (ma[0]) {
644				vm_page_lock_queues();
645				vm_page_protect(ma[0], VM_PROT_NONE);
646				vm_page_free(ma[0]);
647				vm_page_unlock_queues();
648			}
649			return (EIO);
650		}
651	}
652	vm_page_lock_queues();
653	vm_page_wire(ma[0]);
654	vm_page_wakeup(ma[0]);
655	vm_page_unlock_queues();
656
657	pmap_qenter((vm_offset_t)imgp->image_header, ma, 1);
658	imgp->firstpage = ma[0];
659
660	return (0);
661}
662
663void
664exec_unmap_first_page(imgp)
665	struct image_params *imgp;
666{
667	GIANT_REQUIRED;
668
669	if (imgp->firstpage) {
670		pmap_qremove((vm_offset_t)imgp->image_header, 1);
671		vm_page_lock_queues();
672		vm_page_unwire(imgp->firstpage, 1);
673		vm_page_unlock_queues();
674		imgp->firstpage = NULL;
675	}
676}
677
678/*
679 * Destroy old address space, and allocate a new stack
680 *	The new stack is only SGROWSIZ large because it is grown
681 *	automatically in trap.c.
682 */
683int
684exec_new_vmspace(imgp, minuser, maxuser, stack_addr)
685	struct image_params *imgp;
686	vm_offset_t minuser, maxuser, stack_addr;
687{
688	int error;
689	struct execlist *ep;
690	struct proc *p = imgp->proc;
691	struct vmspace *vmspace = p->p_vmspace;
692
693	GIANT_REQUIRED;
694
695	stack_addr = stack_addr - maxssiz;
696
697	imgp->vmspace_destroyed = 1;
698
699	/*
700	 * Perform functions registered with at_exec().
701	 */
702	TAILQ_FOREACH(ep, &exec_list, next)
703		(*ep->function)(p);
704
705	/*
706	 * Blow away entire process VM, if address space not shared,
707	 * otherwise, create a new VM space so that other threads are
708	 * not disrupted
709	 */
710	if (vmspace->vm_refcnt == 1 &&
711	    vm_map_min(&vmspace->vm_map) == minuser &&
712	    vm_map_max(&vmspace->vm_map) == maxuser) {
713		if (vmspace->vm_shm)
714			shmexit(p);
715		pmap_remove_pages(vmspace_pmap(vmspace), minuser, maxuser);
716		vm_map_remove(&vmspace->vm_map, minuser, maxuser);
717	} else {
718		vmspace_exec(p, minuser, maxuser);
719		vmspace = p->p_vmspace;
720	}
721
722	/* Allocate a new stack */
723	error = vm_map_stack(&vmspace->vm_map, stack_addr, (vm_size_t)maxssiz,
724	    VM_PROT_ALL, VM_PROT_ALL, 0);
725	if (error)
726		return (error);
727
728#ifdef __ia64__
729	{
730		/*
731		 * Allocate backing store. We really need something
732		 * similar to vm_map_stack which can allow the backing
733		 * store to grow upwards. This will do for now.
734		 */
735		vm_offset_t bsaddr;
736		bsaddr = USRSTACK - 2 * maxssiz;
737		error = vm_map_find(&vmspace->vm_map, 0, 0, &bsaddr,
738		    regstkpages * PAGE_SIZE, 0, VM_PROT_ALL, VM_PROT_ALL, 0);
739		FIRST_THREAD_IN_PROC(p)->td_md.md_bspstore = bsaddr;
740	}
741#endif
742
743	/* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
744	 * VM_STACK case, but they are still used to monitor the size of the
745	 * process stack so we can check the stack rlimit.
746	 */
747	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
748	vmspace->vm_maxsaddr = (char *)USRSTACK - maxssiz;
749
750	return (0);
751}
752
753/*
754 * Copy out argument and environment strings from the old process
755 *	address space into the temporary string buffer.
756 */
757int
758exec_extract_strings(imgp)
759	struct image_params *imgp;
760{
761	char	**argv, **envv;
762	char	*argp, *envp;
763	int	error;
764	size_t	length;
765
766	/*
767	 * extract arguments first
768	 */
769
770	argv = imgp->uap->argv;
771
772	if (argv) {
773		argp = (caddr_t)(intptr_t)fuword(argv);
774		if (argp == (caddr_t)-1)
775			return (EFAULT);
776		if (argp)
777			argv++;
778		if (imgp->argv0)
779			argp = imgp->argv0;
780		if (argp) {
781			do {
782				if (argp == (caddr_t)-1)
783					return (EFAULT);
784				if ((error = copyinstr(argp, imgp->stringp,
785				    imgp->stringspace, &length))) {
786					if (error == ENAMETOOLONG)
787						return (E2BIG);
788					return (error);
789				}
790				imgp->stringspace -= length;
791				imgp->stringp += length;
792				imgp->argc++;
793			} while ((argp = (caddr_t)(intptr_t)fuword(argv++)));
794		}
795	}
796
797	imgp->endargs = imgp->stringp;
798
799	/*
800	 * extract environment strings
801	 */
802
803	envv = imgp->uap->envv;
804
805	if (envv) {
806		while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
807			if (envp == (caddr_t)-1)
808				return (EFAULT);
809			if ((error = copyinstr(envp, imgp->stringp,
810			    imgp->stringspace, &length))) {
811				if (error == ENAMETOOLONG)
812					return (E2BIG);
813				return (error);
814			}
815			imgp->stringspace -= length;
816			imgp->stringp += length;
817			imgp->envc++;
818		}
819	}
820
821	return (0);
822}
823
824/*
825 * Copy strings out to the new process address space, constructing
826 *	new arg and env vector tables. Return a pointer to the base
827 *	so that it can be used as the initial stack pointer.
828 */
829register_t *
830exec_copyout_strings(imgp)
831	struct image_params *imgp;
832{
833	int argc, envc;
834	char **vectp;
835	char *stringp, *destp;
836	register_t *stack_base;
837	struct ps_strings *arginfo;
838	struct proc *p;
839	int szsigcode;
840
841	/*
842	 * Calculate string base and vector table pointers.
843	 * Also deal with signal trampoline code for this exec type.
844	 */
845	p = imgp->proc;
846	szsigcode = 0;
847	arginfo = (struct ps_strings *)PS_STRINGS;
848	if (p->p_sysent->sv_szsigcode != NULL)
849		szsigcode = *(p->p_sysent->sv_szsigcode);
850	destp =	(caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
851	    roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
852
853	/*
854	 * install sigcode
855	 */
856	if (szsigcode)
857		copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
858		    szsigcode), szsigcode);
859
860	/*
861	 * If we have a valid auxargs ptr, prepare some room
862	 * on the stack.
863	 */
864	if (imgp->auxargs) {
865		/*
866		 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
867		 * lower compatibility.
868		 */
869		imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
870		    (AT_COUNT * 2);
871		/*
872		 * The '+ 2' is for the null pointers at the end of each of
873		 * the arg and env vector sets,and imgp->auxarg_size is room
874		 * for argument of Runtime loader.
875		 */
876		vectp = (char **)(destp - (imgp->argc + imgp->envc + 2 +
877		    imgp->auxarg_size) * sizeof(char *));
878
879	} else
880		/*
881		 * The '+ 2' is for the null pointers at the end of each of
882		 * the arg and env vector sets
883		 */
884		vectp = (char **)(destp - (imgp->argc + imgp->envc + 2) *
885		    sizeof(char *));
886
887	/*
888	 * vectp also becomes our initial stack base
889	 */
890	stack_base = (register_t *)vectp;
891
892	stringp = imgp->stringbase;
893	argc = imgp->argc;
894	envc = imgp->envc;
895
896	/*
897	 * Copy out strings - arguments and environment.
898	 */
899	copyout(stringp, destp, ARG_MAX - imgp->stringspace);
900
901	/*
902	 * Fill in "ps_strings" struct for ps, w, etc.
903	 */
904	suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
905	suword(&arginfo->ps_nargvstr, argc);
906
907	/*
908	 * Fill in argument portion of vector table.
909	 */
910	for (; argc > 0; --argc) {
911		suword(vectp++, (long)(intptr_t)destp);
912		while (*stringp++ != 0)
913			destp++;
914		destp++;
915	}
916
917	/* a null vector table pointer separates the argp's from the envp's */
918	suword(vectp++, 0);
919
920	suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
921	suword(&arginfo->ps_nenvstr, envc);
922
923	/*
924	 * Fill in environment portion of vector table.
925	 */
926	for (; envc > 0; --envc) {
927		suword(vectp++, (long)(intptr_t)destp);
928		while (*stringp++ != 0)
929			destp++;
930		destp++;
931	}
932
933	/* end of vector table is a null pointer */
934	suword(vectp, 0);
935
936	return (stack_base);
937}
938
939/*
940 * Check permissions of file to execute.
941 *	Called with imgp->vp locked.
942 *	Return 0 for success or error code on failure.
943 */
944int
945exec_check_permissions(imgp)
946	struct image_params *imgp;
947{
948	struct vnode *vp = imgp->vp;
949	struct vattr *attr = imgp->attr;
950	struct thread *td;
951	int error;
952
953	td = curthread;			/* XXXKSE */
954
955#ifdef MAC
956	error = mac_check_vnode_exec(td->td_ucred, imgp->vp);
957	if (error)
958		return (error);
959#endif
960
961	/* Get file attributes */
962	error = VOP_GETATTR(vp, attr, td->td_ucred, td);
963	if (error)
964		return (error);
965
966	/*
967	 * 1) Check if file execution is disabled for the filesystem that this
968	 *	file resides on.
969	 * 2) Insure that at least one execute bit is on - otherwise root
970	 *	will always succeed, and we don't want to happen unless the
971	 *	file really is executable.
972	 * 3) Insure that the file is a regular file.
973	 */
974	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
975	    ((attr->va_mode & 0111) == 0) ||
976	    (attr->va_type != VREG))
977		return (EACCES);
978
979	/*
980	 * Zero length files can't be exec'd
981	 */
982	if (attr->va_size == 0)
983		return (ENOEXEC);
984
985	/*
986	 *  Check for execute permission to file based on current credentials.
987	 */
988	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
989	if (error)
990		return (error);
991
992	/*
993	 * Check number of open-for-writes on the file and deny execution
994	 * if there are any.
995	 */
996	if (vp->v_writecount)
997		return (ETXTBSY);
998
999	/*
1000	 * Call filesystem specific open routine (which does nothing in the
1001	 * general case).
1002	 */
1003	error = VOP_OPEN(vp, FREAD, td->td_ucred, td);
1004	return (error);
1005}
1006
1007/*
1008 * Exec handler registration
1009 */
1010int
1011exec_register(execsw_arg)
1012	const struct execsw *execsw_arg;
1013{
1014	const struct execsw **es, **xs, **newexecsw;
1015	int count = 2;	/* New slot and trailing NULL */
1016
1017	if (execsw)
1018		for (es = execsw; *es; es++)
1019			count++;
1020	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1021	if (newexecsw == NULL)
1022		return (ENOMEM);
1023	xs = newexecsw;
1024	if (execsw)
1025		for (es = execsw; *es; es++)
1026			*xs++ = *es;
1027	*xs++ = execsw_arg;
1028	*xs = NULL;
1029	if (execsw)
1030		free(execsw, M_TEMP);
1031	execsw = newexecsw;
1032	return (0);
1033}
1034
1035int
1036exec_unregister(execsw_arg)
1037	const struct execsw *execsw_arg;
1038{
1039	const struct execsw **es, **xs, **newexecsw;
1040	int count = 1;
1041
1042	if (execsw == NULL)
1043		panic("unregister with no handlers left?\n");
1044
1045	for (es = execsw; *es; es++) {
1046		if (*es == execsw_arg)
1047			break;
1048	}
1049	if (*es == NULL)
1050		return (ENOENT);
1051	for (es = execsw; *es; es++)
1052		if (*es != execsw_arg)
1053			count++;
1054	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1055	if (newexecsw == NULL)
1056		return (ENOMEM);
1057	xs = newexecsw;
1058	for (es = execsw; *es; es++)
1059		if (*es != execsw_arg)
1060			*xs++ = *es;
1061	*xs = NULL;
1062	if (execsw)
1063		free(execsw, M_TEMP);
1064	execsw = newexecsw;
1065	return (0);
1066}
1067
1068int
1069at_exec(function)
1070	execlist_fn function;
1071{
1072	struct execlist *ep;
1073
1074#ifdef INVARIANTS
1075	/* Be noisy if the programmer has lost track of things */
1076	if (rm_at_exec(function))
1077		printf("WARNING: exec callout entry (%p) already present\n",
1078		    function);
1079#endif
1080	ep = malloc(sizeof(*ep), M_ATEXEC, M_NOWAIT);
1081	if (ep == NULL)
1082		return (ENOMEM);
1083	ep->function = function;
1084	TAILQ_INSERT_TAIL(&exec_list, ep, next);
1085	return (0);
1086}
1087
1088/*
1089 * Scan the exec callout list for the given item and remove it.
1090 * Returns the number of items removed (0 or 1)
1091 */
1092int
1093rm_at_exec(function)
1094	execlist_fn function;
1095{
1096	struct execlist *ep;
1097
1098	TAILQ_FOREACH(ep, &exec_list, next) {
1099		if (ep->function == function) {
1100			TAILQ_REMOVE(&exec_list, ep, next);
1101			free(ep, M_ATEXEC);
1102			return (1);
1103		}
1104	}
1105	return (0);
1106}
1107