kern_sharedpage.c revision 132592
1/*
2 * Copyright (c) 1993, David Greenman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_exec.c 132592 2004-07-24 04:57:41Z julian $");
29
30#include "opt_ktrace.h"
31#include "opt_mac.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/eventhandler.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/sysproto.h>
39#include <sys/signalvar.h>
40#include <sys/kernel.h>
41#include <sys/mac.h>
42#include <sys/mount.h>
43#include <sys/filedesc.h>
44#include <sys/fcntl.h>
45#include <sys/acct.h>
46#include <sys/exec.h>
47#include <sys/imgact.h>
48#include <sys/imgact_elf.h>
49#include <sys/wait.h>
50#include <sys/malloc.h>
51#include <sys/proc.h>
52#include <sys/pioctl.h>
53#include <sys/namei.h>
54#include <sys/sf_buf.h>
55#include <sys/sysent.h>
56#include <sys/shm.h>
57#include <sys/sysctl.h>
58#include <sys/user.h>
59#include <sys/vnode.h>
60#ifdef KTRACE
61#include <sys/ktrace.h>
62#endif
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/pmap.h>
67#include <vm/vm_page.h>
68#include <vm/vm_map.h>
69#include <vm/vm_kern.h>
70#include <vm/vm_extern.h>
71#include <vm/vm_object.h>
72#include <vm/vm_pager.h>
73
74#include <machine/reg.h>
75
76MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
77
78static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
79static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
80static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
81static int kern_execve(struct thread *td, char *fname, char **argv,
82	char **envv, struct mac *mac_p);
83
84/* XXX This should be vm_size_t. */
85SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
86    NULL, 0, sysctl_kern_ps_strings, "LU", "");
87
88/* XXX This should be vm_size_t. */
89SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD,
90    NULL, 0, sysctl_kern_usrstack, "LU", "");
91
92SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
93    NULL, 0, sysctl_kern_stackprot, "I", "");
94
95u_long ps_arg_cache_limit = PAGE_SIZE / 16;
96SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
97    &ps_arg_cache_limit, 0, "");
98
99static int
100sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
101{
102	struct proc *p;
103	int error;
104
105	p = curproc;
106#if defined(__amd64__) || defined(__ia64__)
107	if (req->oldlen == sizeof(unsigned int)) {
108		unsigned int val;
109		val = (unsigned int)p->p_sysent->sv_psstrings;
110		error = SYSCTL_OUT(req, &val, sizeof(val));
111	} else
112#endif
113		error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
114		   sizeof(p->p_sysent->sv_psstrings));
115	return error;
116}
117
118static int
119sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
120{
121	struct proc *p;
122	int error;
123
124	p = curproc;
125#if defined(__amd64__) || defined(__ia64__)
126	if (req->oldlen == sizeof(unsigned int)) {
127		unsigned int val;
128		val = (unsigned int)p->p_sysent->sv_usrstack;
129		error = SYSCTL_OUT(req, &val, sizeof(val));
130	} else
131#endif
132		error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
133		    sizeof(p->p_sysent->sv_usrstack));
134	return error;
135}
136
137static int
138sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
139{
140	struct proc *p;
141
142	p = curproc;
143	return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
144	    sizeof(p->p_sysent->sv_stackprot)));
145}
146
147/*
148 * Each of the items is a pointer to a `const struct execsw', hence the
149 * double pointer here.
150 */
151static const struct execsw **execsw;
152
153#ifndef _SYS_SYSPROTO_H_
154struct execve_args {
155	char    *fname;
156	char    **argv;
157	char    **envv;
158};
159#endif
160
161/*
162 * MPSAFE
163 */
164int
165execve(td, uap)
166	struct thread *td;
167	struct execve_args /* {
168		char *fname;
169		char **argv;
170		char **envv;
171	} */ *uap;
172{
173
174	return (kern_execve(td, uap->fname, uap->argv, uap->envv, NULL));
175}
176
177#ifndef _SYS_SYSPROTO_H_
178struct __mac_execve_args {
179	char	*fname;
180	char	**argv;
181	char	**envv;
182	struct mac	*mac_p;
183};
184#endif
185
186/*
187 * MPSAFE
188 */
189int
190__mac_execve(td, uap)
191	struct thread *td;
192	struct __mac_execve_args /* {
193		char *fname;
194		char **argv;
195		char **envv;
196		struct mac *mac_p;
197	} */ *uap;
198{
199
200#ifdef MAC
201	return (kern_execve(td, uap->fname, uap->argv, uap->envv,
202	    uap->mac_p));
203#else
204	return (ENOSYS);
205#endif
206}
207
208/*
209 * In-kernel implementation of execve().  All arguments are assumed to be
210 * userspace pointers from the passed thread.
211 *
212 * MPSAFE
213 */
214static int
215kern_execve(td, fname, argv, envv, mac_p)
216	struct thread *td;
217	char *fname;
218	char **argv;
219	char **envv;
220	struct mac *mac_p;
221{
222	struct proc *p = td->td_proc;
223	struct nameidata nd, *ndp;
224	struct ucred *newcred = NULL, *oldcred;
225	struct uidinfo *euip;
226	register_t *stack_base;
227	int error, len, i;
228	struct image_params image_params, *imgp;
229	struct vattr attr;
230	int (*img_first)(struct image_params *);
231	struct pargs *oldargs = NULL, *newargs = NULL;
232	struct sigacts *oldsigacts, *newsigacts;
233#ifdef KTRACE
234	struct vnode *tracevp = NULL;
235	struct ucred *tracecred = NULL;
236#endif
237	struct vnode *textvp = NULL;
238	int credential_changing;
239	int textset;
240#ifdef MAC
241	struct label *interplabel = NULL;
242	int will_transition;
243#endif
244
245	imgp = &image_params;
246
247	/*
248	 * Lock the process and set the P_INEXEC flag to indicate that
249	 * it should be left alone until we're done here.  This is
250	 * necessary to avoid race conditions - e.g. in ptrace() -
251	 * that might allow a local user to illicitly obtain elevated
252	 * privileges.
253	 */
254	PROC_LOCK(p);
255	KASSERT((p->p_flag & P_INEXEC) == 0,
256	    ("%s(): process already has P_INEXEC flag", __func__));
257	if (p->p_flag & P_SA || p->p_numthreads > 1) {
258		if (thread_single(SINGLE_EXIT)) {
259			PROC_UNLOCK(p);
260			mtx_unlock(&Giant);
261			return (ERESTART);	/* Try again later. */
262		}
263		/*
264		 * If we get here all other threads are dead,
265		 * so unset the associated flags and lose KSE mode.
266		 */
267		p->p_flag &= ~P_SA;
268		td->td_mailbox = NULL;
269		td->td_pflags &= ~TDP_SA;
270		thread_single_end();
271	}
272	p->p_flag |= P_INEXEC;
273	PROC_UNLOCK(p);
274
275	/*
276	 * Initialize part of the common data
277	 */
278	imgp->proc = p;
279	imgp->userspace_argv = argv;
280	imgp->userspace_envv = envv;
281	imgp->execlabel = NULL;
282	imgp->attr = &attr;
283	imgp->argc = imgp->envc = 0;
284	imgp->argv0 = NULL;
285	imgp->entry_addr = 0;
286	imgp->vmspace_destroyed = 0;
287	imgp->interpreted = 0;
288	imgp->interpreter_name[0] = '\0';
289	imgp->auxargs = NULL;
290	imgp->vp = NULL;
291	imgp->object = NULL;
292	imgp->firstpage = NULL;
293	imgp->ps_strings = 0;
294	imgp->auxarg_size = 0;
295
296#ifdef MAC
297	error = mac_execve_enter(imgp, mac_p);
298	if (error) {
299		mtx_lock(&Giant);
300		goto exec_fail;
301	}
302#endif
303
304	/*
305	 * Allocate temporary demand zeroed space for argument and
306	 *	environment strings
307	 */
308	imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX);
309	if (imgp->stringbase == NULL) {
310		error = ENOMEM;
311		mtx_lock(&Giant);
312		goto exec_fail;
313	}
314	imgp->stringp = imgp->stringbase;
315	imgp->stringspace = ARG_MAX;
316	imgp->image_header = NULL;
317
318	/*
319	 * Translate the file name. namei() returns a vnode pointer
320	 *	in ni_vp amoung other things.
321	 */
322	ndp = &nd;
323	NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
324	    UIO_USERSPACE, fname, td);
325
326	mtx_lock(&Giant);
327interpret:
328
329	error = namei(ndp);
330	if (error) {
331		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
332		    ARG_MAX);
333		goto exec_fail;
334	}
335
336	imgp->vp = ndp->ni_vp;
337	imgp->fname = fname;
338
339	/*
340	 * Check file permissions (also 'opens' file)
341	 */
342	error = exec_check_permissions(imgp);
343	if (error)
344		goto exec_fail_dealloc;
345
346	if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0)
347		vm_object_reference(imgp->object);
348
349	/*
350	 * Set VV_TEXT now so no one can write to the executable while we're
351	 * activating it.
352	 *
353	 * Remember if this was set before and unset it in case this is not
354	 * actually an executable image.
355	 */
356	textset = imgp->vp->v_vflag & VV_TEXT;
357	imgp->vp->v_vflag |= VV_TEXT;
358
359	error = exec_map_first_page(imgp);
360	if (error)
361		goto exec_fail_dealloc;
362
363	/*
364	 *	If the current process has a special image activator it
365	 *	wants to try first, call it.   For example, emulating shell
366	 *	scripts differently.
367	 */
368	error = -1;
369	if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
370		error = img_first(imgp);
371
372	/*
373	 *	Loop through the list of image activators, calling each one.
374	 *	An activator returns -1 if there is no match, 0 on success,
375	 *	and an error otherwise.
376	 */
377	for (i = 0; error == -1 && execsw[i]; ++i) {
378		if (execsw[i]->ex_imgact == NULL ||
379		    execsw[i]->ex_imgact == img_first) {
380			continue;
381		}
382		error = (*execsw[i]->ex_imgact)(imgp);
383	}
384
385	if (error) {
386		if (error == -1) {
387			if (textset == 0)
388				imgp->vp->v_vflag &= ~VV_TEXT;
389			error = ENOEXEC;
390		}
391		goto exec_fail_dealloc;
392	}
393
394	/*
395	 * Special interpreter operation, cleanup and loop up to try to
396	 * activate the interpreter.
397	 */
398	if (imgp->interpreted) {
399		exec_unmap_first_page(imgp);
400		/*
401		 * VV_TEXT needs to be unset for scripts.  There is a short
402		 * period before we determine that something is a script where
403		 * VV_TEXT will be set. The vnode lock is held over this
404		 * entire period so nothing should illegitimately be blocked.
405		 */
406		imgp->vp->v_vflag &= ~VV_TEXT;
407		/* free name buffer and old vnode */
408		NDFREE(ndp, NDF_ONLY_PNBUF);
409#ifdef MAC
410		interplabel = mac_vnode_label_alloc();
411		mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel);
412#endif
413		vput(ndp->ni_vp);
414		vm_object_deallocate(imgp->object);
415		imgp->object = NULL;
416		/* set new name to that of the interpreter */
417		NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
418		    UIO_SYSSPACE, imgp->interpreter_name, td);
419		goto interpret;
420	}
421
422	/*
423	 * Copy out strings (args and env) and initialize stack base
424	 */
425	if (p->p_sysent->sv_copyout_strings)
426		stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
427	else
428		stack_base = exec_copyout_strings(imgp);
429
430	/*
431	 * If custom stack fixup routine present for this process
432	 * let it do the stack setup.
433	 * Else stuff argument count as first item on stack
434	 */
435	if (p->p_sysent->sv_fixup != NULL)
436		(*p->p_sysent->sv_fixup)(&stack_base, imgp);
437	else
438		suword(--stack_base, imgp->argc);
439
440	/*
441	 * For security and other reasons, the file descriptor table cannot
442	 * be shared after an exec.
443	 */
444	FILEDESC_LOCK(p->p_fd);
445	if (p->p_fd->fd_refcnt > 1) {
446		struct filedesc *tmp;
447
448		tmp = fdcopy(td->td_proc->p_fd);
449		FILEDESC_UNLOCK(p->p_fd);
450		fdfree(td);
451		p->p_fd = tmp;
452	} else
453		FILEDESC_UNLOCK(p->p_fd);
454
455	/*
456	 * Malloc things before we need locks.
457	 */
458	newcred = crget();
459	euip = uifind(attr.va_uid);
460	i = imgp->endargs - imgp->stringbase;
461	if (ps_arg_cache_limit >= i + sizeof(struct pargs))
462		newargs = pargs_alloc(i);
463
464	/* close files on exec */
465	fdcloseexec(td);
466
467	/* Get a reference to the vnode prior to locking the proc */
468	VREF(ndp->ni_vp);
469
470	/*
471	 * For security and other reasons, signal handlers cannot
472	 * be shared after an exec. The new process gets a copy of the old
473	 * handlers. In execsigs(), the new process will have its signals
474	 * reset.
475	 */
476	PROC_LOCK(p);
477	if (sigacts_shared(p->p_sigacts)) {
478		oldsigacts = p->p_sigacts;
479		PROC_UNLOCK(p);
480		newsigacts = sigacts_alloc();
481		sigacts_copy(newsigacts, oldsigacts);
482		PROC_LOCK(p);
483		p->p_sigacts = newsigacts;
484	} else
485		oldsigacts = NULL;
486
487	/* Stop profiling */
488	stopprofclock(p);
489
490	/* reset caught signals */
491	execsigs(p);
492
493	/* name this process - nameiexec(p, ndp) */
494	len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
495	bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
496	p->p_comm[len] = 0;
497
498	/*
499	 * mark as execed, wakeup the process that vforked (if any) and tell
500	 * it that it now has its own resources back
501	 */
502	p->p_flag |= P_EXEC;
503	if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
504		p->p_flag &= ~P_PPWAIT;
505		wakeup(p->p_pptr);
506	}
507
508	/*
509	 * Implement image setuid/setgid.
510	 *
511	 * Don't honor setuid/setgid if the filesystem prohibits it or if
512	 * the process is being traced.
513	 *
514	 * XXXMAC: For the time being, use NOSUID to also prohibit
515	 * transitions on the file system.
516	 */
517	oldcred = p->p_ucred;
518	credential_changing = 0;
519	credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid !=
520	    attr.va_uid;
521	credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid !=
522	    attr.va_gid;
523#ifdef MAC
524	will_transition = mac_execve_will_transition(oldcred, imgp->vp,
525	    interplabel, imgp);
526	credential_changing |= will_transition;
527#endif
528
529	if (credential_changing &&
530	    (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
531	    (p->p_flag & P_TRACED) == 0) {
532		/*
533		 * Turn off syscall tracing for set-id programs, except for
534		 * root.  Record any set-id flags first to make sure that
535		 * we do not regain any tracing during a possible block.
536		 */
537		setsugid(p);
538#ifdef KTRACE
539		if (p->p_tracevp != NULL && suser_cred(oldcred, PRISON_ROOT)) {
540			mtx_lock(&ktrace_mtx);
541			p->p_traceflag = 0;
542			tracevp = p->p_tracevp;
543			p->p_tracevp = NULL;
544			tracecred = p->p_tracecred;
545			p->p_tracecred = NULL;
546			mtx_unlock(&ktrace_mtx);
547		}
548#endif
549		/*
550		 * Close any file descriptors 0..2 that reference procfs,
551		 * then make sure file descriptors 0..2 are in use.
552		 *
553		 * setugidsafety() may call closef() and then pfind()
554		 * which may grab the process lock.
555		 * fdcheckstd() may call falloc() which may block to
556		 * allocate memory, so temporarily drop the process lock.
557		 */
558		PROC_UNLOCK(p);
559		setugidsafety(td);
560		error = fdcheckstd(td);
561		if (error != 0)
562			goto done1;
563		PROC_LOCK(p);
564		/*
565		 * Set the new credentials.
566		 */
567		crcopy(newcred, oldcred);
568		if (attr.va_mode & VSUID)
569			change_euid(newcred, euip);
570		if (attr.va_mode & VSGID)
571			change_egid(newcred, attr.va_gid);
572#ifdef MAC
573		if (will_transition) {
574			mac_execve_transition(oldcred, newcred, imgp->vp,
575			    interplabel, imgp);
576		}
577#endif
578		/*
579		 * Implement correct POSIX saved-id behavior.
580		 *
581		 * XXXMAC: Note that the current logic will save the
582		 * uid and gid if a MAC domain transition occurs, even
583		 * though maybe it shouldn't.
584		 */
585		change_svuid(newcred, newcred->cr_uid);
586		change_svgid(newcred, newcred->cr_gid);
587		p->p_ucred = newcred;
588		newcred = NULL;
589	} else {
590		if (oldcred->cr_uid == oldcred->cr_ruid &&
591		    oldcred->cr_gid == oldcred->cr_rgid)
592			p->p_flag &= ~P_SUGID;
593		/*
594		 * Implement correct POSIX saved-id behavior.
595		 *
596		 * XXX: It's not clear that the existing behavior is
597		 * POSIX-compliant.  A number of sources indicate that the
598		 * saved uid/gid should only be updated if the new ruid is
599		 * not equal to the old ruid, or the new euid is not equal
600		 * to the old euid and the new euid is not equal to the old
601		 * ruid.  The FreeBSD code always updates the saved uid/gid.
602		 * Also, this code uses the new (replaced) euid and egid as
603		 * the source, which may or may not be the right ones to use.
604		 */
605		if (oldcred->cr_svuid != oldcred->cr_uid ||
606		    oldcred->cr_svgid != oldcred->cr_gid) {
607			crcopy(newcred, oldcred);
608			change_svuid(newcred, newcred->cr_uid);
609			change_svgid(newcred, newcred->cr_gid);
610			p->p_ucred = newcred;
611			newcred = NULL;
612		}
613	}
614
615	/*
616	 * Store the vp for use in procfs.  This vnode was referenced prior
617	 * to locking the proc lock.
618	 */
619	textvp = p->p_textvp;
620	p->p_textvp = ndp->ni_vp;
621
622	/*
623	 * Notify others that we exec'd, and clear the P_INEXEC flag
624	 * as we're now a bona fide freshly-execed process.
625	 */
626	KNOTE(&p->p_klist, NOTE_EXEC);
627	p->p_flag &= ~P_INEXEC;
628
629	/*
630	 * If tracing the process, trap to debugger so breakpoints
631	 * can be set before the program executes.
632	 */
633	if (p->p_flag & P_TRACED)
634		psignal(p, SIGTRAP);
635
636	/* clear "fork but no exec" flag, as we _are_ execing */
637	p->p_acflag &= ~AFORK;
638
639	/* Free any previous argument cache */
640	oldargs = p->p_args;
641	p->p_args = NULL;
642
643	/* Cache arguments if they fit inside our allowance */
644	if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
645		bcopy(imgp->stringbase, newargs->ar_args, i);
646		p->p_args = newargs;
647		newargs = NULL;
648	}
649	PROC_UNLOCK(p);
650
651	/* Set values passed into the program in registers. */
652	if (p->p_sysent->sv_setregs)
653		(*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
654		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
655	else
656		exec_setregs(td, imgp->entry_addr,
657		    (u_long)(uintptr_t)stack_base, imgp->ps_strings);
658
659done1:
660	/*
661	 * Free any resources malloc'd earlier that we didn't use.
662	 */
663	uifree(euip);
664	if (newcred == NULL)
665		crfree(oldcred);
666	else
667		crfree(newcred);
668	/*
669	 * Handle deferred decrement of ref counts.
670	 */
671	if (textvp != NULL)
672		vrele(textvp);
673	if (ndp->ni_vp && error != 0)
674		vrele(ndp->ni_vp);
675#ifdef KTRACE
676	if (tracevp != NULL)
677		vrele(tracevp);
678	if (tracecred != NULL)
679		crfree(tracecred);
680#endif
681	if (oldargs != NULL)
682		pargs_drop(oldargs);
683	if (newargs != NULL)
684		pargs_drop(newargs);
685	if (oldsigacts != NULL)
686		sigacts_free(oldsigacts);
687
688exec_fail_dealloc:
689
690	/*
691	 * free various allocated resources
692	 */
693	if (imgp->firstpage != NULL)
694		exec_unmap_first_page(imgp);
695
696	if (imgp->vp != NULL) {
697		NDFREE(ndp, NDF_ONLY_PNBUF);
698		vput(imgp->vp);
699	}
700
701	if (imgp->stringbase != NULL)
702		kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
703		    ARG_MAX);
704
705	if (imgp->object != NULL)
706		vm_object_deallocate(imgp->object);
707
708	if (error == 0) {
709		/*
710		 * Stop the process here if its stop event mask has
711		 * the S_EXEC bit set.
712		 */
713		STOPEVENT(p, S_EXEC, 0);
714		goto done2;
715	}
716
717exec_fail:
718	/* we're done here, clear P_INEXEC */
719	PROC_LOCK(p);
720	p->p_flag &= ~P_INEXEC;
721	PROC_UNLOCK(p);
722
723	if (imgp->vmspace_destroyed) {
724		/* sorry, no more process anymore. exit gracefully */
725#ifdef MAC
726		mac_execve_exit(imgp);
727		if (interplabel != NULL)
728			mac_vnode_label_free(interplabel);
729#endif
730		exit1(td, W_EXITCODE(0, SIGABRT));
731		/* NOT REACHED */
732		error = 0;
733	}
734done2:
735#ifdef MAC
736	mac_execve_exit(imgp);
737	if (interplabel != NULL)
738		mac_vnode_label_free(interplabel);
739#endif
740	mtx_unlock(&Giant);
741	return (error);
742}
743
744int
745exec_map_first_page(imgp)
746	struct image_params *imgp;
747{
748	int rv, i;
749	int initial_pagein;
750	vm_page_t ma[VM_INITIAL_PAGEIN];
751	vm_object_t object;
752
753	GIANT_REQUIRED;
754
755	if (imgp->firstpage != NULL)
756		exec_unmap_first_page(imgp);
757
758	VOP_GETVOBJECT(imgp->vp, &object);
759	VM_OBJECT_LOCK(object);
760	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
761	if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
762		initial_pagein = VM_INITIAL_PAGEIN;
763		if (initial_pagein > object->size)
764			initial_pagein = object->size;
765		for (i = 1; i < initial_pagein; i++) {
766			if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
767				if (ma[i]->valid)
768					break;
769				vm_page_lock_queues();
770				if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) {
771					vm_page_unlock_queues();
772					break;
773				}
774				vm_page_busy(ma[i]);
775				vm_page_unlock_queues();
776			} else {
777				ma[i] = vm_page_alloc(object, i,
778				    VM_ALLOC_NORMAL);
779				if (ma[i] == NULL)
780					break;
781			}
782		}
783		initial_pagein = i;
784		rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
785		ma[0] = vm_page_lookup(object, 0);
786		if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||
787		    (ma[0]->valid == 0)) {
788			if (ma[0]) {
789				vm_page_lock_queues();
790				pmap_remove_all(ma[0]);
791				vm_page_free(ma[0]);
792				vm_page_unlock_queues();
793			}
794			VM_OBJECT_UNLOCK(object);
795			return (EIO);
796		}
797	}
798	vm_page_lock_queues();
799	vm_page_hold(ma[0]);
800	vm_page_wakeup(ma[0]);
801	vm_page_unlock_queues();
802	VM_OBJECT_UNLOCK(object);
803
804	imgp->firstpage = sf_buf_alloc(ma[0], 0);
805	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
806
807	return (0);
808}
809
810void
811exec_unmap_first_page(imgp)
812	struct image_params *imgp;
813{
814	vm_page_t m;
815
816	if (imgp->firstpage != NULL) {
817		m = sf_buf_page(imgp->firstpage);
818		sf_buf_free(imgp->firstpage);
819		imgp->firstpage = NULL;
820		vm_page_lock_queues();
821		vm_page_unhold(m);
822		vm_page_unlock_queues();
823	}
824}
825
826/*
827 * Destroy old address space, and allocate a new stack
828 *	The new stack is only SGROWSIZ large because it is grown
829 *	automatically in trap.c.
830 */
831int
832exec_new_vmspace(imgp, sv)
833	struct image_params *imgp;
834	struct sysentvec *sv;
835{
836	int error;
837	struct proc *p = imgp->proc;
838	struct vmspace *vmspace = p->p_vmspace;
839	vm_offset_t stack_addr;
840	vm_map_t map;
841
842	GIANT_REQUIRED;
843
844	imgp->vmspace_destroyed = 1;
845
846	/* Called with Giant held, do not depend on it! */
847	EVENTHANDLER_INVOKE(process_exec, p);
848
849	/*
850	 * Here is as good a place as any to do any resource limit cleanups.
851	 * This is needed if a 64 bit binary exec's a 32 bit binary - the
852	 * data size limit may need to be changed to a value that makes
853	 * sense for the 32 bit binary.
854	 */
855	if (sv->sv_fixlimits != NULL)
856		sv->sv_fixlimits(imgp);
857
858	/*
859	 * Blow away entire process VM, if address space not shared,
860	 * otherwise, create a new VM space so that other threads are
861	 * not disrupted
862	 */
863	map = &vmspace->vm_map;
864	if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
865	    vm_map_max(map) == sv->sv_maxuser) {
866		shmexit(vmspace);
867		pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map),
868		    vm_map_max(map));
869		vm_map_remove(map, vm_map_min(map), vm_map_max(map));
870	} else {
871		vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
872		vmspace = p->p_vmspace;
873		map = &vmspace->vm_map;
874	}
875
876	/* Allocate a new stack */
877	stack_addr = sv->sv_usrstack - maxssiz;
878	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
879	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
880	if (error)
881		return (error);
882
883#ifdef __ia64__
884	/* Allocate a new register stack */
885	stack_addr = IA64_BACKINGSTORE;
886	error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
887	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
888	if (error)
889		return (error);
890#endif
891
892	/* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
893	 * VM_STACK case, but they are still used to monitor the size of the
894	 * process stack so we can check the stack rlimit.
895	 */
896	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
897	vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz;
898
899	return (0);
900}
901
902/*
903 * Copy out argument and environment strings from the old process
904 *	address space into the temporary string buffer.
905 */
906int
907exec_extract_strings(imgp)
908	struct image_params *imgp;
909{
910	char	**argv, **envv;
911	char	*argp, *envp;
912	int	error;
913	size_t	length;
914
915	/*
916	 * extract arguments first
917	 */
918
919	argv = imgp->userspace_argv;
920
921	if (argv) {
922		argp = (caddr_t)(intptr_t)fuword(argv);
923		if (argp == (caddr_t)-1)
924			return (EFAULT);
925		if (argp)
926			argv++;
927		if (imgp->argv0)
928			argp = imgp->argv0;
929		if (argp) {
930			do {
931				if (argp == (caddr_t)-1)
932					return (EFAULT);
933				if ((error = copyinstr(argp, imgp->stringp,
934				    imgp->stringspace, &length))) {
935					if (error == ENAMETOOLONG)
936						return (E2BIG);
937					return (error);
938				}
939				imgp->stringspace -= length;
940				imgp->stringp += length;
941				imgp->argc++;
942			} while ((argp = (caddr_t)(intptr_t)fuword(argv++)));
943		}
944	} else
945		return (EFAULT);
946
947	imgp->endargs = imgp->stringp;
948
949	/*
950	 * extract environment strings
951	 */
952
953	envv = imgp->userspace_envv;
954
955	if (envv) {
956		while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
957			if (envp == (caddr_t)-1)
958				return (EFAULT);
959			if ((error = copyinstr(envp, imgp->stringp,
960			    imgp->stringspace, &length))) {
961				if (error == ENAMETOOLONG)
962					return (E2BIG);
963				return (error);
964			}
965			imgp->stringspace -= length;
966			imgp->stringp += length;
967			imgp->envc++;
968		}
969	}
970
971	return (0);
972}
973
974/*
975 * Copy strings out to the new process address space, constructing
976 *	new arg and env vector tables. Return a pointer to the base
977 *	so that it can be used as the initial stack pointer.
978 */
979register_t *
980exec_copyout_strings(imgp)
981	struct image_params *imgp;
982{
983	int argc, envc;
984	char **vectp;
985	char *stringp, *destp;
986	register_t *stack_base;
987	struct ps_strings *arginfo;
988	struct proc *p;
989	int szsigcode;
990
991	/*
992	 * Calculate string base and vector table pointers.
993	 * Also deal with signal trampoline code for this exec type.
994	 */
995	p = imgp->proc;
996	szsigcode = 0;
997	arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
998	if (p->p_sysent->sv_szsigcode != NULL)
999		szsigcode = *(p->p_sysent->sv_szsigcode);
1000	destp =	(caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
1001	    roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
1002
1003	/*
1004	 * install sigcode
1005	 */
1006	if (szsigcode)
1007		copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
1008		    szsigcode), szsigcode);
1009
1010	/*
1011	 * If we have a valid auxargs ptr, prepare some room
1012	 * on the stack.
1013	 */
1014	if (imgp->auxargs) {
1015		/*
1016		 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
1017		 * lower compatibility.
1018		 */
1019		imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
1020		    (AT_COUNT * 2);
1021		/*
1022		 * The '+ 2' is for the null pointers at the end of each of
1023		 * the arg and env vector sets,and imgp->auxarg_size is room
1024		 * for argument of Runtime loader.
1025		 */
1026		vectp = (char **)(destp - (imgp->argc + imgp->envc + 2 +
1027		    imgp->auxarg_size) * sizeof(char *));
1028
1029	} else
1030		/*
1031		 * The '+ 2' is for the null pointers at the end of each of
1032		 * the arg and env vector sets
1033		 */
1034		vectp = (char **)(destp - (imgp->argc + imgp->envc + 2) *
1035		    sizeof(char *));
1036
1037	/*
1038	 * vectp also becomes our initial stack base
1039	 */
1040	stack_base = (register_t *)vectp;
1041
1042	stringp = imgp->stringbase;
1043	argc = imgp->argc;
1044	envc = imgp->envc;
1045
1046	/*
1047	 * Copy out strings - arguments and environment.
1048	 */
1049	copyout(stringp, destp, ARG_MAX - imgp->stringspace);
1050
1051	/*
1052	 * Fill in "ps_strings" struct for ps, w, etc.
1053	 */
1054	suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
1055	suword(&arginfo->ps_nargvstr, argc);
1056
1057	/*
1058	 * Fill in argument portion of vector table.
1059	 */
1060	for (; argc > 0; --argc) {
1061		suword(vectp++, (long)(intptr_t)destp);
1062		while (*stringp++ != 0)
1063			destp++;
1064		destp++;
1065	}
1066
1067	/* a null vector table pointer separates the argp's from the envp's */
1068	suword(vectp++, 0);
1069
1070	suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
1071	suword(&arginfo->ps_nenvstr, envc);
1072
1073	/*
1074	 * Fill in environment portion of vector table.
1075	 */
1076	for (; envc > 0; --envc) {
1077		suword(vectp++, (long)(intptr_t)destp);
1078		while (*stringp++ != 0)
1079			destp++;
1080		destp++;
1081	}
1082
1083	/* end of vector table is a null pointer */
1084	suword(vectp, 0);
1085
1086	return (stack_base);
1087}
1088
1089/*
1090 * Check permissions of file to execute.
1091 *	Called with imgp->vp locked.
1092 *	Return 0 for success or error code on failure.
1093 */
1094int
1095exec_check_permissions(imgp)
1096	struct image_params *imgp;
1097{
1098	struct vnode *vp = imgp->vp;
1099	struct vattr *attr = imgp->attr;
1100	struct thread *td;
1101	int error;
1102
1103	td = curthread;			/* XXXKSE */
1104
1105	/* Get file attributes */
1106	error = VOP_GETATTR(vp, attr, td->td_ucred, td);
1107	if (error)
1108		return (error);
1109
1110#ifdef MAC
1111	error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp);
1112	if (error)
1113		return (error);
1114#endif
1115
1116	/*
1117	 * 1) Check if file execution is disabled for the filesystem that this
1118	 *	file resides on.
1119	 * 2) Insure that at least one execute bit is on - otherwise root
1120	 *	will always succeed, and we don't want to happen unless the
1121	 *	file really is executable.
1122	 * 3) Insure that the file is a regular file.
1123	 */
1124	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1125	    ((attr->va_mode & 0111) == 0) ||
1126	    (attr->va_type != VREG))
1127		return (EACCES);
1128
1129	/*
1130	 * Zero length files can't be exec'd
1131	 */
1132	if (attr->va_size == 0)
1133		return (ENOEXEC);
1134
1135	/*
1136	 *  Check for execute permission to file based on current credentials.
1137	 */
1138	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1139	if (error)
1140		return (error);
1141
1142	/*
1143	 * Check number of open-for-writes on the file and deny execution
1144	 * if there are any.
1145	 */
1146	if (vp->v_writecount)
1147		return (ETXTBSY);
1148
1149	/*
1150	 * Call filesystem specific open routine (which does nothing in the
1151	 * general case).
1152	 */
1153	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
1154	return (error);
1155}
1156
1157/*
1158 * Exec handler registration
1159 */
1160int
1161exec_register(execsw_arg)
1162	const struct execsw *execsw_arg;
1163{
1164	const struct execsw **es, **xs, **newexecsw;
1165	int count = 2;	/* New slot and trailing NULL */
1166
1167	if (execsw)
1168		for (es = execsw; *es; es++)
1169			count++;
1170	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1171	if (newexecsw == NULL)
1172		return (ENOMEM);
1173	xs = newexecsw;
1174	if (execsw)
1175		for (es = execsw; *es; es++)
1176			*xs++ = *es;
1177	*xs++ = execsw_arg;
1178	*xs = NULL;
1179	if (execsw)
1180		free(execsw, M_TEMP);
1181	execsw = newexecsw;
1182	return (0);
1183}
1184
1185int
1186exec_unregister(execsw_arg)
1187	const struct execsw *execsw_arg;
1188{
1189	const struct execsw **es, **xs, **newexecsw;
1190	int count = 1;
1191
1192	if (execsw == NULL)
1193		panic("unregister with no handlers left?\n");
1194
1195	for (es = execsw; *es; es++) {
1196		if (*es == execsw_arg)
1197			break;
1198	}
1199	if (*es == NULL)
1200		return (ENOENT);
1201	for (es = execsw; *es; es++)
1202		if (*es != execsw_arg)
1203			count++;
1204	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1205	if (newexecsw == NULL)
1206		return (ENOMEM);
1207	xs = newexecsw;
1208	for (es = execsw; *es; es++)
1209		if (*es != execsw_arg)
1210			*xs++ = *es;
1211	*xs = NULL;
1212	if (execsw)
1213		free(execsw, M_TEMP);
1214	execsw = newexecsw;
1215	return (0);
1216}
1217