Deleted Added
full compact
kern_sharedpage.c (237474) kern_sharedpage.c (237477)
1/*-
1/*-
2 * Copyright (c) 1993, David Greenman
2 * Copyright (c) 2010, 2012 Konstantin Belousov <kib@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright

--- 9 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright

--- 9 unchanged lines hidden (view full) ---

20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_exec.c 237474 2012-06-23 09:33:06Z kib $");
28__FBSDID("$FreeBSD: head/sys/kern/kern_sharedpage.c 237477 2012-06-23 10:15:23Z kib $");
29
29
30#include "opt_capsicum.h"
31#include "opt_compat.h"
30#include "opt_compat.h"
32#include "opt_hwpmc_hooks.h"
33#include "opt_kdtrace.h"
34#include "opt_ktrace.h"
35#include "opt_vm.h"
36
37#include <sys/param.h>
31#include "opt_vm.h"
32
33#include <sys/param.h>
38#include <sys/capability.h>
39#include <sys/systm.h>
34#include <sys/systm.h>
40#include <sys/capability.h>
41#include <sys/eventhandler.h>
35#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
44#include <sys/sysproto.h>
45#include <sys/signalvar.h>
46#include <sys/kernel.h>
47#include <sys/mount.h>
48#include <sys/filedesc.h>
49#include <sys/fcntl.h>
50#include <sys/acct.h>
51#include <sys/exec.h>
52#include <sys/imgact.h>
53#include <sys/imgact_elf.h>
54#include <sys/wait.h>
55#include <sys/malloc.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/pioctl.h>
59#include <sys/namei.h>
60#include <sys/resourcevar.h>
61#include <sys/sched.h>
62#include <sys/sdt.h>
63#include <sys/sf_buf.h>
64#include <sys/syscallsubr.h>
65#include <sys/sysent.h>
38#include <sys/sysent.h>
66#include <sys/shm.h>
67#include <sys/sysctl.h>
68#include <sys/vdso.h>
39#include <sys/sysctl.h>
40#include <sys/vdso.h>
69#include <sys/vnode.h>
70#include <sys/stat.h>
71#ifdef KTRACE
72#include <sys/ktrace.h>
73#endif
74
75#include <vm/vm.h>
76#include <vm/vm_param.h>
77#include <vm/pmap.h>
41
42#include <vm/vm.h>
43#include <vm/vm_param.h>
44#include <vm/pmap.h>
78#include <vm/vm_page.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
45#include <vm/vm_extern.h>
46#include <vm/vm_kern.h>
47#include <vm/vm_map.h>
82#include <vm/vm_object.h>
48#include <vm/vm_object.h>
49#include <vm/vm_page.h>
83#include <vm/vm_pager.h>
84
50#include <vm/vm_pager.h>
51
85#ifdef HWPMC_HOOKS
86#include <sys/pmckern.h>
87#endif
88
89#include <machine/reg.h>
90
91#include <security/audit/audit.h>
92#include <security/mac/mac_framework.h>
93
94#ifdef KDTRACE_HOOKS
95#include <sys/dtrace_bsd.h>
96dtrace_execexit_func_t dtrace_fasttrap_exec;
97#endif
98
99SDT_PROVIDER_DECLARE(proc);
100SDT_PROBE_DEFINE(proc, kernel, , exec, exec);
101SDT_PROBE_ARGTYPE(proc, kernel, , exec, 0, "char *");
102SDT_PROBE_DEFINE(proc, kernel, , exec_failure, exec-failure);
103SDT_PROBE_ARGTYPE(proc, kernel, , exec_failure, 0, "int");
104SDT_PROBE_DEFINE(proc, kernel, , exec_success, exec-success);
105SDT_PROBE_ARGTYPE(proc, kernel, , exec_success, 0, "char *");
106
107MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
108
109static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
110static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
111static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
112static int do_execve(struct thread *td, struct image_args *args,
113 struct mac *mac_p);
114
115/* XXX This should be vm_size_t. */
116SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
117 NULL, 0, sysctl_kern_ps_strings, "LU", "");
118
119/* XXX This should be vm_size_t. */
120SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
121 CTLFLAG_CAPRD, NULL, 0, sysctl_kern_usrstack, "LU", "");
122
123SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
124 NULL, 0, sysctl_kern_stackprot, "I", "");
125
126u_long ps_arg_cache_limit = PAGE_SIZE / 16;
127SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
128 &ps_arg_cache_limit, 0, "");
129
130static int map_at_zero = 0;
131TUNABLE_INT("security.bsd.map_at_zero", &map_at_zero);
132SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RW, &map_at_zero, 0,
133 "Permit processes to map an object at virtual address 0.");
134
135static int
136sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
137{
138 struct proc *p;
139 int error;
140
141 p = curproc;
142#ifdef SCTL_MASK32
143 if (req->flags & SCTL_MASK32) {
144 unsigned int val;
145 val = (unsigned int)p->p_sysent->sv_psstrings;
146 error = SYSCTL_OUT(req, &val, sizeof(val));
147 } else
148#endif
149 error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
150 sizeof(p->p_sysent->sv_psstrings));
151 return error;
152}
153
154static int
155sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
156{
157 struct proc *p;
158 int error;
159
160 p = curproc;
161#ifdef SCTL_MASK32
162 if (req->flags & SCTL_MASK32) {
163 unsigned int val;
164 val = (unsigned int)p->p_sysent->sv_usrstack;
165 error = SYSCTL_OUT(req, &val, sizeof(val));
166 } else
167#endif
168 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
169 sizeof(p->p_sysent->sv_usrstack));
170 return error;
171}
172
173static int
174sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
175{
176 struct proc *p;
177
178 p = curproc;
179 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
180 sizeof(p->p_sysent->sv_stackprot)));
181}
182
183/*
184 * Each of the items is a pointer to a `const struct execsw', hence the
185 * double pointer here.
186 */
187static const struct execsw **execsw;
188
189#ifndef _SYS_SYSPROTO_H_
190struct execve_args {
191 char *fname;
192 char **argv;
193 char **envv;
194};
195#endif
196
197int
198sys_execve(td, uap)
199 struct thread *td;
200 struct execve_args /* {
201 char *fname;
202 char **argv;
203 char **envv;
204 } */ *uap;
205{
206 int error;
207 struct image_args args;
208
209 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
210 uap->argv, uap->envv);
211 if (error == 0)
212 error = kern_execve(td, &args, NULL);
213 return (error);
214}
215
216#ifndef _SYS_SYSPROTO_H_
217struct fexecve_args {
218 int fd;
219 char **argv;
220 char **envv;
221}
222#endif
223int
224sys_fexecve(struct thread *td, struct fexecve_args *uap)
225{
226 int error;
227 struct image_args args;
228
229 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
230 uap->argv, uap->envv);
231 if (error == 0) {
232 args.fd = uap->fd;
233 error = kern_execve(td, &args, NULL);
234 }
235 return (error);
236}
237
238#ifndef _SYS_SYSPROTO_H_
239struct __mac_execve_args {
240 char *fname;
241 char **argv;
242 char **envv;
243 struct mac *mac_p;
244};
245#endif
246
247int
248sys___mac_execve(td, uap)
249 struct thread *td;
250 struct __mac_execve_args /* {
251 char *fname;
252 char **argv;
253 char **envv;
254 struct mac *mac_p;
255 } */ *uap;
256{
257#ifdef MAC
258 int error;
259 struct image_args args;
260
261 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
262 uap->argv, uap->envv);
263 if (error == 0)
264 error = kern_execve(td, &args, uap->mac_p);
265 return (error);
266#else
267 return (ENOSYS);
268#endif
269}
270
271/*
272 * XXX: kern_execve has the astonishing property of not always returning to
273 * the caller. If sufficiently bad things happen during the call to
274 * do_execve(), it can end up calling exit1(); as a result, callers must
275 * avoid doing anything which they might need to undo (e.g., allocating
276 * memory).
277 */
278int
279kern_execve(td, args, mac_p)
280 struct thread *td;
281 struct image_args *args;
282 struct mac *mac_p;
283{
284 struct proc *p = td->td_proc;
285 int error;
286
287 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
288 args->begin_envv - args->begin_argv);
289 AUDIT_ARG_ENVV(args->begin_envv, args->envc,
290 args->endp - args->begin_envv);
291 if (p->p_flag & P_HADTHREADS) {
292 PROC_LOCK(p);
293 if (thread_single(SINGLE_BOUNDARY)) {
294 PROC_UNLOCK(p);
295 exec_free_args(args);
296 return (ERESTART); /* Try again later. */
297 }
298 PROC_UNLOCK(p);
299 }
300
301 error = do_execve(td, args, mac_p);
302
303 if (p->p_flag & P_HADTHREADS) {
304 PROC_LOCK(p);
305 /*
306 * If success, we upgrade to SINGLE_EXIT state to
307 * force other threads to suicide.
308 */
309 if (error == 0)
310 thread_single(SINGLE_EXIT);
311 else
312 thread_single_end();
313 PROC_UNLOCK(p);
314 }
315
316 return (error);
317}
318
319/*
320 * In-kernel implementation of execve(). All arguments are assumed to be
321 * userspace pointers from the passed thread.
322 */
323static int
324do_execve(td, args, mac_p)
325 struct thread *td;
326 struct image_args *args;
327 struct mac *mac_p;
328{
329 struct proc *p = td->td_proc;
330 struct nameidata nd;
331 struct ucred *newcred = NULL, *oldcred;
332 struct uidinfo *euip;
333 register_t *stack_base;
334 int error, i;
335 struct image_params image_params, *imgp;
336 struct vattr attr;
337 int (*img_first)(struct image_params *);
338 struct pargs *oldargs = NULL, *newargs = NULL;
339 struct sigacts *oldsigacts, *newsigacts;
340#ifdef KTRACE
341 struct vnode *tracevp = NULL;
342 struct ucred *tracecred = NULL;
343#endif
344 struct vnode *textvp = NULL, *binvp = NULL;
345 int credential_changing;
346 int vfslocked;
347 int textset;
348#ifdef MAC
349 struct label *interpvplabel = NULL;
350 int will_transition;
351#endif
352#ifdef HWPMC_HOOKS
353 struct pmckern_procexec pe;
354#endif
355 static const char fexecv_proc_title[] = "(fexecv)";
356
357 vfslocked = 0;
358 imgp = &image_params;
359
360 /*
361 * Lock the process and set the P_INEXEC flag to indicate that
362 * it should be left alone until we're done here. This is
363 * necessary to avoid race conditions - e.g. in ptrace() -
364 * that might allow a local user to illicitly obtain elevated
365 * privileges.
366 */
367 PROC_LOCK(p);
368 KASSERT((p->p_flag & P_INEXEC) == 0,
369 ("%s(): process already has P_INEXEC flag", __func__));
370 p->p_flag |= P_INEXEC;
371 PROC_UNLOCK(p);
372
373 /*
374 * Initialize part of the common data
375 */
376 imgp->proc = p;
377 imgp->execlabel = NULL;
378 imgp->attr = &attr;
379 imgp->entry_addr = 0;
380 imgp->reloc_base = 0;
381 imgp->vmspace_destroyed = 0;
382 imgp->interpreted = 0;
383 imgp->opened = 0;
384 imgp->interpreter_name = NULL;
385 imgp->auxargs = NULL;
386 imgp->vp = NULL;
387 imgp->object = NULL;
388 imgp->firstpage = NULL;
389 imgp->ps_strings = 0;
390 imgp->auxarg_size = 0;
391 imgp->args = args;
392 imgp->execpath = imgp->freepath = NULL;
393 imgp->execpathp = 0;
394 imgp->canary = 0;
395 imgp->canarylen = 0;
396 imgp->pagesizes = 0;
397 imgp->pagesizeslen = 0;
398 imgp->stack_prot = 0;
399
400#ifdef MAC
401 error = mac_execve_enter(imgp, mac_p);
402 if (error)
403 goto exec_fail;
404#endif
405
406 imgp->image_header = NULL;
407
408 /*
409 * Translate the file name. namei() returns a vnode pointer
410 * in ni_vp amoung other things.
411 *
412 * XXXAUDIT: It would be desirable to also audit the name of the
413 * interpreter if this is an interpreted binary.
414 */
415 if (args->fname != NULL) {
416 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME
417 | MPSAFE | AUDITVNODE1, UIO_SYSSPACE, args->fname, td);
418 }
419
420 SDT_PROBE(proc, kernel, , exec, args->fname, 0, 0, 0, 0 );
421
422interpret:
423 if (args->fname != NULL) {
424#ifdef CAPABILITY_MODE
425 /*
426 * While capability mode can't reach this point via direct
427 * path arguments to execve(), we also don't allow
428 * interpreters to be used in capability mode (for now).
429 * Catch indirect lookups and return a permissions error.
430 */
431 if (IN_CAPABILITY_MODE(td)) {
432 error = ECAPMODE;
433 goto exec_fail;
434 }
435#endif
436 error = namei(&nd);
437 if (error)
438 goto exec_fail;
439
440 vfslocked = NDHASGIANT(&nd);
441 binvp = nd.ni_vp;
442 imgp->vp = binvp;
443 } else {
444 AUDIT_ARG_FD(args->fd);
445 /*
446 * Some might argue that CAP_READ and/or CAP_MMAP should also
447 * be required here; such arguments will be entertained.
448 */
449 error = fgetvp_read(td, args->fd, CAP_FEXECVE, &binvp);
450 if (error)
451 goto exec_fail;
452 vfslocked = VFS_LOCK_GIANT(binvp->v_mount);
453 vn_lock(binvp, LK_EXCLUSIVE | LK_RETRY);
454 AUDIT_ARG_VNODE1(binvp);
455 imgp->vp = binvp;
456 }
457
458 /*
459 * Check file permissions (also 'opens' file)
460 */
461 error = exec_check_permissions(imgp);
462 if (error)
463 goto exec_fail_dealloc;
464
465 imgp->object = imgp->vp->v_object;
466 if (imgp->object != NULL)
467 vm_object_reference(imgp->object);
468
469 /*
470 * Set VV_TEXT now so no one can write to the executable while we're
471 * activating it.
472 *
473 * Remember if this was set before and unset it in case this is not
474 * actually an executable image.
475 */
476 textset = imgp->vp->v_vflag & VV_TEXT;
477 ASSERT_VOP_ELOCKED(imgp->vp, "vv_text");
478 imgp->vp->v_vflag |= VV_TEXT;
479
480 error = exec_map_first_page(imgp);
481 if (error)
482 goto exec_fail_dealloc;
483
484 imgp->proc->p_osrel = 0;
485 /*
486 * If the current process has a special image activator it
487 * wants to try first, call it. For example, emulating shell
488 * scripts differently.
489 */
490 error = -1;
491 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
492 error = img_first(imgp);
493
494 /*
495 * Loop through the list of image activators, calling each one.
496 * An activator returns -1 if there is no match, 0 on success,
497 * and an error otherwise.
498 */
499 for (i = 0; error == -1 && execsw[i]; ++i) {
500 if (execsw[i]->ex_imgact == NULL ||
501 execsw[i]->ex_imgact == img_first) {
502 continue;
503 }
504 error = (*execsw[i]->ex_imgact)(imgp);
505 }
506
507 if (error) {
508 if (error == -1) {
509 if (textset == 0) {
510 ASSERT_VOP_ELOCKED(imgp->vp, "vv_text");
511 imgp->vp->v_vflag &= ~VV_TEXT;
512 }
513 error = ENOEXEC;
514 }
515 goto exec_fail_dealloc;
516 }
517
518 /*
519 * Special interpreter operation, cleanup and loop up to try to
520 * activate the interpreter.
521 */
522 if (imgp->interpreted) {
523 exec_unmap_first_page(imgp);
524 /*
525 * VV_TEXT needs to be unset for scripts. There is a short
526 * period before we determine that something is a script where
527 * VV_TEXT will be set. The vnode lock is held over this
528 * entire period so nothing should illegitimately be blocked.
529 */
530 imgp->vp->v_vflag &= ~VV_TEXT;
531 /* free name buffer and old vnode */
532 if (args->fname != NULL)
533 NDFREE(&nd, NDF_ONLY_PNBUF);
534#ifdef MAC
535 mac_execve_interpreter_enter(binvp, &interpvplabel);
536#endif
537 if (imgp->opened) {
538 VOP_CLOSE(binvp, FREAD, td->td_ucred, td);
539 imgp->opened = 0;
540 }
541 vput(binvp);
542 vm_object_deallocate(imgp->object);
543 imgp->object = NULL;
544 VFS_UNLOCK_GIANT(vfslocked);
545 vfslocked = 0;
546 /* set new name to that of the interpreter */
547 NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE,
548 UIO_SYSSPACE, imgp->interpreter_name, td);
549 args->fname = imgp->interpreter_name;
550 goto interpret;
551 }
552
553 /*
554 * NB: We unlock the vnode here because it is believed that none
555 * of the sv_copyout_strings/sv_fixup operations require the vnode.
556 */
557 VOP_UNLOCK(imgp->vp, 0);
558
559 /*
560 * Do the best to calculate the full path to the image file.
561 */
562 if (imgp->auxargs != NULL &&
563 ((args->fname != NULL && args->fname[0] == '/') ||
564 vn_fullpath(td, imgp->vp, &imgp->execpath, &imgp->freepath) != 0))
565 imgp->execpath = args->fname;
566
567 /*
568 * Copy out strings (args and env) and initialize stack base
569 */
570 if (p->p_sysent->sv_copyout_strings)
571 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
572 else
573 stack_base = exec_copyout_strings(imgp);
574
575 /*
576 * If custom stack fixup routine present for this process
577 * let it do the stack setup.
578 * Else stuff argument count as first item on stack
579 */
580 if (p->p_sysent->sv_fixup != NULL)
581 (*p->p_sysent->sv_fixup)(&stack_base, imgp);
582 else
583 suword(--stack_base, imgp->args->argc);
584
585 /*
586 * For security and other reasons, the file descriptor table cannot
587 * be shared after an exec.
588 */
589 fdunshare(p, td);
590
591 /*
592 * Malloc things before we need locks.
593 */
594 newcred = crget();
595 euip = uifind(attr.va_uid);
596 i = imgp->args->begin_envv - imgp->args->begin_argv;
597 /* Cache arguments if they fit inside our allowance */
598 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
599 newargs = pargs_alloc(i);
600 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
601 }
602
603 /* close files on exec */
604 fdcloseexec(td);
605 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
606
607 /* Get a reference to the vnode prior to locking the proc */
608 VREF(binvp);
609
610 /*
611 * For security and other reasons, signal handlers cannot
612 * be shared after an exec. The new process gets a copy of the old
613 * handlers. In execsigs(), the new process will have its signals
614 * reset.
615 */
616 PROC_LOCK(p);
617 oldcred = crcopysafe(p, newcred);
618 if (sigacts_shared(p->p_sigacts)) {
619 oldsigacts = p->p_sigacts;
620 PROC_UNLOCK(p);
621 newsigacts = sigacts_alloc();
622 sigacts_copy(newsigacts, oldsigacts);
623 PROC_LOCK(p);
624 p->p_sigacts = newsigacts;
625 } else
626 oldsigacts = NULL;
627
628 /* Stop profiling */
629 stopprofclock(p);
630
631 /* reset caught signals */
632 execsigs(p);
633
634 /* name this process - nameiexec(p, ndp) */
635 bzero(p->p_comm, sizeof(p->p_comm));
636 if (args->fname)
637 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
638 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
639 else if (vn_commname(binvp, p->p_comm, sizeof(p->p_comm)) != 0)
640 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
641 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
642#ifdef KTR
643 sched_clear_tdname(td);
644#endif
645
646 /*
647 * mark as execed, wakeup the process that vforked (if any) and tell
648 * it that it now has its own resources back
649 */
650 p->p_flag |= P_EXEC;
651 if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
652 p->p_flag &= ~P_PPWAIT;
653 cv_broadcast(&p->p_pwait);
654 }
655
656 /*
657 * Implement image setuid/setgid.
658 *
659 * Don't honor setuid/setgid if the filesystem prohibits it or if
660 * the process is being traced.
661 *
662 * We disable setuid/setgid/etc in compatibility mode on the basis
663 * that most setugid applications are not written with that
664 * environment in mind, and will therefore almost certainly operate
665 * incorrectly. In principle there's no reason that setugid
666 * applications might not be useful in capability mode, so we may want
667 * to reconsider this conservative design choice in the future.
668 *
669 * XXXMAC: For the time being, use NOSUID to also prohibit
670 * transitions on the file system.
671 */
672 credential_changing = 0;
673 credential_changing |= (attr.va_mode & S_ISUID) && oldcred->cr_uid !=
674 attr.va_uid;
675 credential_changing |= (attr.va_mode & S_ISGID) && oldcred->cr_gid !=
676 attr.va_gid;
677#ifdef MAC
678 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
679 interpvplabel, imgp);
680 credential_changing |= will_transition;
681#endif
682
683 if (credential_changing &&
684#ifdef CAPABILITY_MODE
685 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
686#endif
687 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
688 (p->p_flag & P_TRACED) == 0) {
689 /*
690 * Turn off syscall tracing for set-id programs, except for
691 * root. Record any set-id flags first to make sure that
692 * we do not regain any tracing during a possible block.
693 */
694 setsugid(p);
695
696#ifdef KTRACE
697 if (priv_check_cred(oldcred, PRIV_DEBUG_DIFFCRED, 0))
698 ktrprocexec(p, &tracecred, &tracevp);
699#endif
700 /*
701 * Close any file descriptors 0..2 that reference procfs,
702 * then make sure file descriptors 0..2 are in use.
703 *
704 * setugidsafety() may call closef() and then pfind()
705 * which may grab the process lock.
706 * fdcheckstd() may call falloc() which may block to
707 * allocate memory, so temporarily drop the process lock.
708 */
709 PROC_UNLOCK(p);
710 VOP_UNLOCK(imgp->vp, 0);
711 setugidsafety(td);
712 error = fdcheckstd(td);
713 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
714 if (error != 0)
715 goto done1;
716 PROC_LOCK(p);
717 /*
718 * Set the new credentials.
719 */
720 if (attr.va_mode & S_ISUID)
721 change_euid(newcred, euip);
722 if (attr.va_mode & S_ISGID)
723 change_egid(newcred, attr.va_gid);
724#ifdef MAC
725 if (will_transition) {
726 mac_vnode_execve_transition(oldcred, newcred, imgp->vp,
727 interpvplabel, imgp);
728 }
729#endif
730 /*
731 * Implement correct POSIX saved-id behavior.
732 *
733 * XXXMAC: Note that the current logic will save the
734 * uid and gid if a MAC domain transition occurs, even
735 * though maybe it shouldn't.
736 */
737 change_svuid(newcred, newcred->cr_uid);
738 change_svgid(newcred, newcred->cr_gid);
739 p->p_ucred = newcred;
740 newcred = NULL;
741 } else {
742 if (oldcred->cr_uid == oldcred->cr_ruid &&
743 oldcred->cr_gid == oldcred->cr_rgid)
744 p->p_flag &= ~P_SUGID;
745 /*
746 * Implement correct POSIX saved-id behavior.
747 *
748 * XXX: It's not clear that the existing behavior is
749 * POSIX-compliant. A number of sources indicate that the
750 * saved uid/gid should only be updated if the new ruid is
751 * not equal to the old ruid, or the new euid is not equal
752 * to the old euid and the new euid is not equal to the old
753 * ruid. The FreeBSD code always updates the saved uid/gid.
754 * Also, this code uses the new (replaced) euid and egid as
755 * the source, which may or may not be the right ones to use.
756 */
757 if (oldcred->cr_svuid != oldcred->cr_uid ||
758 oldcred->cr_svgid != oldcred->cr_gid) {
759 change_svuid(newcred, newcred->cr_uid);
760 change_svgid(newcred, newcred->cr_gid);
761 p->p_ucred = newcred;
762 newcred = NULL;
763 }
764 }
765
766 /*
767 * Store the vp for use in procfs. This vnode was referenced prior
768 * to locking the proc lock.
769 */
770 textvp = p->p_textvp;
771 p->p_textvp = binvp;
772
773#ifdef KDTRACE_HOOKS
774 /*
775 * Tell the DTrace fasttrap provider about the exec if it
776 * has declared an interest.
777 */
778 if (dtrace_fasttrap_exec)
779 dtrace_fasttrap_exec(p);
780#endif
781
782 /*
783 * Notify others that we exec'd, and clear the P_INEXEC flag
784 * as we're now a bona fide freshly-execed process.
785 */
786 KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
787 p->p_flag &= ~P_INEXEC;
788
789 /* clear "fork but no exec" flag, as we _are_ execing */
790 p->p_acflag &= ~AFORK;
791
792 /*
793 * Free any previous argument cache and replace it with
794 * the new argument cache, if any.
795 */
796 oldargs = p->p_args;
797 p->p_args = newargs;
798 newargs = NULL;
799
800#ifdef HWPMC_HOOKS
801 /*
802 * Check if system-wide sampling is in effect or if the
803 * current process is using PMCs. If so, do exec() time
804 * processing. This processing needs to happen AFTER the
805 * P_INEXEC flag is cleared.
806 *
807 * The proc lock needs to be released before taking the PMC
808 * SX.
809 */
810 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
811 PROC_UNLOCK(p);
812 VOP_UNLOCK(imgp->vp, 0);
813 pe.pm_credentialschanged = credential_changing;
814 pe.pm_entryaddr = imgp->entry_addr;
815
816 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
817 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
818 } else
819 PROC_UNLOCK(p);
820#else /* !HWPMC_HOOKS */
821 PROC_UNLOCK(p);
822#endif
823
824 /* Set values passed into the program in registers. */
825 if (p->p_sysent->sv_setregs)
826 (*p->p_sysent->sv_setregs)(td, imgp,
827 (u_long)(uintptr_t)stack_base);
828 else
829 exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base);
830
831 vfs_mark_atime(imgp->vp, td->td_ucred);
832
833 SDT_PROBE(proc, kernel, , exec_success, args->fname, 0, 0, 0, 0);
834
835done1:
836 /*
837 * Free any resources malloc'd earlier that we didn't use.
838 */
839 uifree(euip);
840 if (newcred == NULL)
841 crfree(oldcred);
842 else
843 crfree(newcred);
844 VOP_UNLOCK(imgp->vp, 0);
845
846 /*
847 * Handle deferred decrement of ref counts.
848 */
849 if (textvp != NULL) {
850 int tvfslocked;
851
852 tvfslocked = VFS_LOCK_GIANT(textvp->v_mount);
853 vrele(textvp);
854 VFS_UNLOCK_GIANT(tvfslocked);
855 }
856 if (binvp && error != 0)
857 vrele(binvp);
858#ifdef KTRACE
859 if (tracevp != NULL) {
860 int tvfslocked;
861
862 tvfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
863 vrele(tracevp);
864 VFS_UNLOCK_GIANT(tvfslocked);
865 }
866 if (tracecred != NULL)
867 crfree(tracecred);
868#endif
869 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
870 pargs_drop(oldargs);
871 pargs_drop(newargs);
872 if (oldsigacts != NULL)
873 sigacts_free(oldsigacts);
874
875exec_fail_dealloc:
876
877 /*
878 * free various allocated resources
879 */
880 if (imgp->firstpage != NULL)
881 exec_unmap_first_page(imgp);
882
883 if (imgp->vp != NULL) {
884 if (args->fname)
885 NDFREE(&nd, NDF_ONLY_PNBUF);
886 if (imgp->opened)
887 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
888 vput(imgp->vp);
889 }
890
891 if (imgp->object != NULL)
892 vm_object_deallocate(imgp->object);
893
894 free(imgp->freepath, M_TEMP);
895
896 if (error == 0) {
897 PROC_LOCK(p);
898 td->td_dbgflags |= TDB_EXEC;
899 PROC_UNLOCK(p);
900
901 /*
902 * Stop the process here if its stop event mask has
903 * the S_EXEC bit set.
904 */
905 STOPEVENT(p, S_EXEC, 0);
906 goto done2;
907 }
908
909exec_fail:
910 /* we're done here, clear P_INEXEC */
911 PROC_LOCK(p);
912 p->p_flag &= ~P_INEXEC;
913 PROC_UNLOCK(p);
914
915 SDT_PROBE(proc, kernel, , exec_failure, error, 0, 0, 0, 0);
916
917done2:
918#ifdef MAC
919 mac_execve_exit(imgp);
920 mac_execve_interpreter_exit(interpvplabel);
921#endif
922 VFS_UNLOCK_GIANT(vfslocked);
923 exec_free_args(args);
924
925 if (error && imgp->vmspace_destroyed) {
926 /* sorry, no more process anymore. exit gracefully */
927 exit1(td, W_EXITCODE(0, SIGABRT));
928 /* NOT REACHED */
929 }
930
931#ifdef KTRACE
932 if (error == 0)
933 ktrprocctor(p);
934#endif
935
936 return (error);
937}
938
939int
940exec_map_first_page(imgp)
941 struct image_params *imgp;
942{
943 int rv, i;
944 int initial_pagein;
945 vm_page_t ma[VM_INITIAL_PAGEIN];
946 vm_object_t object;
947
948 if (imgp->firstpage != NULL)
949 exec_unmap_first_page(imgp);
950
951 object = imgp->vp->v_object;
952 if (object == NULL)
953 return (EACCES);
954 VM_OBJECT_LOCK(object);
955#if VM_NRESERVLEVEL > 0
956 if ((object->flags & OBJ_COLORED) == 0) {
957 object->flags |= OBJ_COLORED;
958 object->pg_color = 0;
959 }
960#endif
961 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
962 if (ma[0]->valid != VM_PAGE_BITS_ALL) {
963 initial_pagein = VM_INITIAL_PAGEIN;
964 if (initial_pagein > object->size)
965 initial_pagein = object->size;
966 for (i = 1; i < initial_pagein; i++) {
967 if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
968 if (ma[i]->valid)
969 break;
970 if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy)
971 break;
972 vm_page_busy(ma[i]);
973 } else {
974 ma[i] = vm_page_alloc(object, i,
975 VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
976 if (ma[i] == NULL)
977 break;
978 }
979 }
980 initial_pagein = i;
981 rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
982 ma[0] = vm_page_lookup(object, 0);
983 if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
984 if (ma[0] != NULL) {
985 vm_page_lock(ma[0]);
986 vm_page_free(ma[0]);
987 vm_page_unlock(ma[0]);
988 }
989 VM_OBJECT_UNLOCK(object);
990 return (EIO);
991 }
992 }
993 vm_page_lock(ma[0]);
994 vm_page_hold(ma[0]);
995 vm_page_unlock(ma[0]);
996 vm_page_wakeup(ma[0]);
997 VM_OBJECT_UNLOCK(object);
998
999 imgp->firstpage = sf_buf_alloc(ma[0], 0);
1000 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1001
1002 return (0);
1003}
1004
1005void
1006exec_unmap_first_page(imgp)
1007 struct image_params *imgp;
1008{
1009 vm_page_t m;
1010
1011 if (imgp->firstpage != NULL) {
1012 m = sf_buf_page(imgp->firstpage);
1013 sf_buf_free(imgp->firstpage);
1014 imgp->firstpage = NULL;
1015 vm_page_lock(m);
1016 vm_page_unhold(m);
1017 vm_page_unlock(m);
1018 }
1019}
1020
1021/*
1022 * Destroy old address space, and allocate a new stack
1023 * The new stack is only SGROWSIZ large because it is grown
1024 * automatically in trap.c.
1025 */
1026int
1027exec_new_vmspace(imgp, sv)
1028 struct image_params *imgp;
1029 struct sysentvec *sv;
1030{
1031 int error;
1032 struct proc *p = imgp->proc;
1033 struct vmspace *vmspace = p->p_vmspace;
1034 vm_object_t obj;
1035 vm_offset_t sv_minuser, stack_addr;
1036 vm_map_t map;
1037 u_long ssiz;
1038
1039 imgp->vmspace_destroyed = 1;
1040 imgp->sysent = sv;
1041
1042 /* May be called with Giant held */
1043 EVENTHANDLER_INVOKE(process_exec, p, imgp);
1044
1045 /*
1046 * Blow away entire process VM, if address space not shared,
1047 * otherwise, create a new VM space so that other threads are
1048 * not disrupted
1049 */
1050 map = &vmspace->vm_map;
1051 if (map_at_zero)
1052 sv_minuser = sv->sv_minuser;
1053 else
1054 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1055 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
1056 vm_map_max(map) == sv->sv_maxuser) {
1057 shmexit(vmspace);
1058 pmap_remove_pages(vmspace_pmap(vmspace));
1059 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1060 } else {
1061 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1062 if (error)
1063 return (error);
1064 vmspace = p->p_vmspace;
1065 map = &vmspace->vm_map;
1066 }
1067
1068 /* Map a shared page */
1069 obj = sv->sv_shared_page_obj;
1070 if (obj != NULL) {
1071 vm_object_reference(obj);
1072 error = vm_map_fixed(map, obj, 0,
1073 sv->sv_shared_page_base, sv->sv_shared_page_len,
1074 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_ALL,
1075 MAP_COPY_ON_WRITE | MAP_ACC_NO_CHARGE);
1076 if (error) {
1077 vm_object_deallocate(obj);
1078 return (error);
1079 }
1080 }
1081
1082 /* Allocate a new stack */
1083 if (sv->sv_maxssiz != NULL)
1084 ssiz = *sv->sv_maxssiz;
1085 else
1086 ssiz = maxssiz;
1087 stack_addr = sv->sv_usrstack - ssiz;
1088 error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
1089 obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1090 sv->sv_stackprot,
1091 VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
1092 if (error)
1093 return (error);
1094
1095#ifdef __ia64__
1096 /* Allocate a new register stack */
1097 stack_addr = IA64_BACKINGSTORE;
1098 error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
1099 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
1100 if (error)
1101 return (error);
1102#endif
1103
1104 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
1105 * VM_STACK case, but they are still used to monitor the size of the
1106 * process stack so we can check the stack rlimit.
1107 */
1108 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1109 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - ssiz;
1110
1111 return (0);
1112}
1113
1114/*
1115 * Copy out argument and environment strings from the old process address
1116 * space into the temporary string buffer.
1117 */
1118int
1119exec_copyin_args(struct image_args *args, char *fname,
1120 enum uio_seg segflg, char **argv, char **envv)
1121{
1122 char *argp, *envp;
1123 int error;
1124 size_t length;
1125
1126 bzero(args, sizeof(*args));
1127 if (argv == NULL)
1128 return (EFAULT);
1129
1130 /*
1131 * Allocate demand-paged memory for the file name, argument, and
1132 * environment strings.
1133 */
1134 error = exec_alloc_args(args);
1135 if (error != 0)
1136 return (error);
1137
1138 /*
1139 * Copy the file name.
1140 */
1141 if (fname != NULL) {
1142 args->fname = args->buf;
1143 error = (segflg == UIO_SYSSPACE) ?
1144 copystr(fname, args->fname, PATH_MAX, &length) :
1145 copyinstr(fname, args->fname, PATH_MAX, &length);
1146 if (error != 0)
1147 goto err_exit;
1148 } else
1149 length = 0;
1150
1151 args->begin_argv = args->buf + length;
1152 args->endp = args->begin_argv;
1153 args->stringspace = ARG_MAX;
1154
1155 /*
1156 * extract arguments first
1157 */
1158 while ((argp = (caddr_t) (intptr_t) fuword(argv++))) {
1159 if (argp == (caddr_t) -1) {
1160 error = EFAULT;
1161 goto err_exit;
1162 }
1163 if ((error = copyinstr(argp, args->endp,
1164 args->stringspace, &length))) {
1165 if (error == ENAMETOOLONG)
1166 error = E2BIG;
1167 goto err_exit;
1168 }
1169 args->stringspace -= length;
1170 args->endp += length;
1171 args->argc++;
1172 }
1173
1174 args->begin_envv = args->endp;
1175
1176 /*
1177 * extract environment strings
1178 */
1179 if (envv) {
1180 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
1181 if (envp == (caddr_t)-1) {
1182 error = EFAULT;
1183 goto err_exit;
1184 }
1185 if ((error = copyinstr(envp, args->endp,
1186 args->stringspace, &length))) {
1187 if (error == ENAMETOOLONG)
1188 error = E2BIG;
1189 goto err_exit;
1190 }
1191 args->stringspace -= length;
1192 args->endp += length;
1193 args->envc++;
1194 }
1195 }
1196
1197 return (0);
1198
1199err_exit:
1200 exec_free_args(args);
1201 return (error);
1202}
1203
1204/*
1205 * Allocate temporary demand-paged, zero-filled memory for the file name,
1206 * argument, and environment strings. Returns zero if the allocation succeeds
1207 * and ENOMEM otherwise.
1208 */
1209int
1210exec_alloc_args(struct image_args *args)
1211{
1212
1213 args->buf = (char *)kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
1214 return (args->buf != NULL ? 0 : ENOMEM);
1215}
1216
1217void
1218exec_free_args(struct image_args *args)
1219{
1220
1221 if (args->buf != NULL) {
1222 kmem_free_wakeup(exec_map, (vm_offset_t)args->buf,
1223 PATH_MAX + ARG_MAX);
1224 args->buf = NULL;
1225 }
1226 if (args->fname_buf != NULL) {
1227 free(args->fname_buf, M_TEMP);
1228 args->fname_buf = NULL;
1229 }
1230}
1231
1232/*
1233 * Copy strings out to the new process address space, constructing new arg
1234 * and env vector tables. Return a pointer to the base so that it can be used
1235 * as the initial stack pointer.
1236 */
1237register_t *
1238exec_copyout_strings(imgp)
1239 struct image_params *imgp;
1240{
1241 int argc, envc;
1242 char **vectp;
1243 char *stringp, *destp;
1244 register_t *stack_base;
1245 struct ps_strings *arginfo;
1246 struct proc *p;
1247 size_t execpath_len;
1248 int szsigcode, szps;
1249 char canary[sizeof(long) * 8];
1250
1251 szps = sizeof(pagesizes[0]) * MAXPAGESIZES;
1252 /*
1253 * Calculate string base and vector table pointers.
1254 * Also deal with signal trampoline code for this exec type.
1255 */
1256 if (imgp->execpath != NULL && imgp->auxargs != NULL)
1257 execpath_len = strlen(imgp->execpath) + 1;
1258 else
1259 execpath_len = 0;
1260 p = imgp->proc;
1261 szsigcode = 0;
1262 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
1263 if (p->p_sysent->sv_sigcode_base == 0) {
1264 if (p->p_sysent->sv_szsigcode != NULL)
1265 szsigcode = *(p->p_sysent->sv_szsigcode);
1266 }
1267 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
1268 roundup(execpath_len, sizeof(char *)) -
1269 roundup(sizeof(canary), sizeof(char *)) -
1270 roundup(szps, sizeof(char *)) -
1271 roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *));
1272
1273 /*
1274 * install sigcode
1275 */
1276 if (szsigcode != 0)
1277 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
1278 szsigcode), szsigcode);
1279
1280 /*
1281 * Copy the image path for the rtld.
1282 */
1283 if (execpath_len != 0) {
1284 imgp->execpathp = (uintptr_t)arginfo - szsigcode - execpath_len;
1285 copyout(imgp->execpath, (void *)imgp->execpathp,
1286 execpath_len);
1287 }
1288
1289 /*
1290 * Prepare the canary for SSP.
1291 */
1292 arc4rand(canary, sizeof(canary), 0);
1293 imgp->canary = (uintptr_t)arginfo - szsigcode - execpath_len -
1294 sizeof(canary);
1295 copyout(canary, (void *)imgp->canary, sizeof(canary));
1296 imgp->canarylen = sizeof(canary);
1297
1298 /*
1299 * Prepare the pagesizes array.
1300 */
1301 imgp->pagesizes = (uintptr_t)arginfo - szsigcode - execpath_len -
1302 roundup(sizeof(canary), sizeof(char *)) - szps;
1303 copyout(pagesizes, (void *)imgp->pagesizes, szps);
1304 imgp->pagesizeslen = szps;
1305
1306 /*
1307 * If we have a valid auxargs ptr, prepare some room
1308 * on the stack.
1309 */
1310 if (imgp->auxargs) {
1311 /*
1312 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
1313 * lower compatibility.
1314 */
1315 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
1316 (AT_COUNT * 2);
1317 /*
1318 * The '+ 2' is for the null pointers at the end of each of
1319 * the arg and env vector sets,and imgp->auxarg_size is room
1320 * for argument of Runtime loader.
1321 */
1322 vectp = (char **)(destp - (imgp->args->argc +
1323 imgp->args->envc + 2 + imgp->auxarg_size)
1324 * sizeof(char *));
1325 } else {
1326 /*
1327 * The '+ 2' is for the null pointers at the end of each of
1328 * the arg and env vector sets
1329 */
1330 vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) *
1331 sizeof(char *));
1332 }
1333
1334 /*
1335 * vectp also becomes our initial stack base
1336 */
1337 stack_base = (register_t *)vectp;
1338
1339 stringp = imgp->args->begin_argv;
1340 argc = imgp->args->argc;
1341 envc = imgp->args->envc;
1342
1343 /*
1344 * Copy out strings - arguments and environment.
1345 */
1346 copyout(stringp, destp, ARG_MAX - imgp->args->stringspace);
1347
1348 /*
1349 * Fill in "ps_strings" struct for ps, w, etc.
1350 */
1351 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
1352 suword32(&arginfo->ps_nargvstr, argc);
1353
1354 /*
1355 * Fill in argument portion of vector table.
1356 */
1357 for (; argc > 0; --argc) {
1358 suword(vectp++, (long)(intptr_t)destp);
1359 while (*stringp++ != 0)
1360 destp++;
1361 destp++;
1362 }
1363
1364 /* a null vector table pointer separates the argp's from the envp's */
1365 suword(vectp++, 0);
1366
1367 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
1368 suword32(&arginfo->ps_nenvstr, envc);
1369
1370 /*
1371 * Fill in environment portion of vector table.
1372 */
1373 for (; envc > 0; --envc) {
1374 suword(vectp++, (long)(intptr_t)destp);
1375 while (*stringp++ != 0)
1376 destp++;
1377 destp++;
1378 }
1379
1380 /* end of vector table is a null pointer */
1381 suword(vectp, 0);
1382
1383 return (stack_base);
1384}
1385
1386/*
1387 * Check permissions of file to execute.
1388 * Called with imgp->vp locked.
1389 * Return 0 for success or error code on failure.
1390 */
1391int
1392exec_check_permissions(imgp)
1393 struct image_params *imgp;
1394{
1395 struct vnode *vp = imgp->vp;
1396 struct vattr *attr = imgp->attr;
1397 struct thread *td;
1398 int error;
1399
1400 td = curthread;
1401
1402 /* Get file attributes */
1403 error = VOP_GETATTR(vp, attr, td->td_ucred);
1404 if (error)
1405 return (error);
1406
1407#ifdef MAC
1408 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1409 if (error)
1410 return (error);
1411#endif
1412
1413 /*
1414 * 1) Check if file execution is disabled for the filesystem that
1415 * this file resides on.
1416 * 2) Ensure that at least one execute bit is on. Otherwise, a
1417 * privileged user will always succeed, and we don't want this
1418 * to happen unless the file really is executable.
1419 * 3) Ensure that the file is a regular file.
1420 */
1421 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1422 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1423 (attr->va_type != VREG))
1424 return (EACCES);
1425
1426 /*
1427 * Zero length files can't be exec'd
1428 */
1429 if (attr->va_size == 0)
1430 return (ENOEXEC);
1431
1432 /*
1433 * Check for execute permission to file based on current credentials.
1434 */
1435 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1436 if (error)
1437 return (error);
1438
1439 /*
1440 * Check number of open-for-writes on the file and deny execution
1441 * if there are any.
1442 */
1443 if (vp->v_writecount)
1444 return (ETXTBSY);
1445
1446 /*
1447 * Call filesystem specific open routine (which does nothing in the
1448 * general case).
1449 */
1450 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1451 if (error == 0)
1452 imgp->opened = 1;
1453 return (error);
1454}
1455
1456/*
1457 * Exec handler registration
1458 */
1459int
1460exec_register(execsw_arg)
1461 const struct execsw *execsw_arg;
1462{
1463 const struct execsw **es, **xs, **newexecsw;
1464 int count = 2; /* New slot and trailing NULL */
1465
1466 if (execsw)
1467 for (es = execsw; *es; es++)
1468 count++;
1469 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1470 if (newexecsw == NULL)
1471 return (ENOMEM);
1472 xs = newexecsw;
1473 if (execsw)
1474 for (es = execsw; *es; es++)
1475 *xs++ = *es;
1476 *xs++ = execsw_arg;
1477 *xs = NULL;
1478 if (execsw)
1479 free(execsw, M_TEMP);
1480 execsw = newexecsw;
1481 return (0);
1482}
1483
1484int
1485exec_unregister(execsw_arg)
1486 const struct execsw *execsw_arg;
1487{
1488 const struct execsw **es, **xs, **newexecsw;
1489 int count = 1;
1490
1491 if (execsw == NULL)
1492 panic("unregister with no handlers left?\n");
1493
1494 for (es = execsw; *es; es++) {
1495 if (*es == execsw_arg)
1496 break;
1497 }
1498 if (*es == NULL)
1499 return (ENOENT);
1500 for (es = execsw; *es; es++)
1501 if (*es != execsw_arg)
1502 count++;
1503 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1504 if (newexecsw == NULL)
1505 return (ENOMEM);
1506 xs = newexecsw;
1507 for (es = execsw; *es; es++)
1508 if (*es != execsw_arg)
1509 *xs++ = *es;
1510 *xs = NULL;
1511 if (execsw)
1512 free(execsw, M_TEMP);
1513 execsw = newexecsw;
1514 return (0);
1515}
1516
1517static struct sx shared_page_alloc_sx;
1518static vm_object_t shared_page_obj;
1519static int shared_page_free;
1520char *shared_page_mapping;
1521
1522void
1523shared_page_write(int base, int size, const void *data)
1524{

--- 181 unchanged lines hidden ---
52static struct sx shared_page_alloc_sx;
53static vm_object_t shared_page_obj;
54static int shared_page_free;
55char *shared_page_mapping;
56
57void
58shared_page_write(int base, int size, const void *data)
59{

--- 181 unchanged lines hidden ---