1/*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35 *
36 *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
37 */
38
39/*
40 * Mapped file (mmap) interface to VM
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD$");
45
46#include "opt_compat.h"
47#include "opt_hwpmc_hooks.h"
48#include "opt_vm.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/capsicum.h>
53#include <sys/kernel.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/sysproto.h>
57#include <sys/filedesc.h>
58#include <sys/priv.h>
59#include <sys/proc.h>
60#include <sys/procctl.h>
61#include <sys/racct.h>
62#include <sys/resource.h>
63#include <sys/resourcevar.h>
64#include <sys/rwlock.h>
65#include <sys/sysctl.h>
66#include <sys/vnode.h>
67#include <sys/fcntl.h>
68#include <sys/file.h>
69#include <sys/mman.h>
70#include <sys/mount.h>
71#include <sys/conf.h>
72#include <sys/stat.h>
73#include <sys/syscallsubr.h>
74#include <sys/sysent.h>
75#include <sys/vmmeter.h>
76
77#include <security/audit/audit.h>
78#include <security/mac/mac_framework.h>
79
80#include <vm/vm.h>
81#include <vm/vm_param.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_object.h>
85#include <vm/vm_page.h>
86#include <vm/vm_pager.h>
87#include <vm/vm_pageout.h>
88#include <vm/vm_extern.h>
89#include <vm/vm_page.h>
90#include <vm/vnode_pager.h>
91
92#ifdef HWPMC_HOOKS
93#include <sys/pmckern.h>
94#endif
95
96int old_mlock = 0;
97SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
98    "Do not apply RLIMIT_MEMLOCK on mlockall");
99
100#ifdef MAP_32BIT
101#define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
102#endif
103
104#ifndef _SYS_SYSPROTO_H_
105struct sbrk_args {
106	int incr;
107};
108#endif
109
110/*
111 * MPSAFE
112 */
113/* ARGSUSED */
114int
115sys_sbrk(td, uap)
116	struct thread *td;
117	struct sbrk_args *uap;
118{
119	/* Not yet implemented */
120	return (EOPNOTSUPP);
121}
122
123#ifndef _SYS_SYSPROTO_H_
124struct sstk_args {
125	int incr;
126};
127#endif
128
129/*
130 * MPSAFE
131 */
132/* ARGSUSED */
133int
134sys_sstk(td, uap)
135	struct thread *td;
136	struct sstk_args *uap;
137{
138	/* Not yet implemented */
139	return (EOPNOTSUPP);
140}
141
142#if defined(COMPAT_43)
143#ifndef _SYS_SYSPROTO_H_
144struct getpagesize_args {
145	int dummy;
146};
147#endif
148
149int
150ogetpagesize(td, uap)
151	struct thread *td;
152	struct getpagesize_args *uap;
153{
154	/* MP SAFE */
155	td->td_retval[0] = PAGE_SIZE;
156	return (0);
157}
158#endif				/* COMPAT_43 */
159
160
161/*
162 * Memory Map (mmap) system call.  Note that the file offset
163 * and address are allowed to be NOT page aligned, though if
164 * the MAP_FIXED flag it set, both must have the same remainder
165 * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
166 * page-aligned, the actual mapping starts at trunc_page(addr)
167 * and the return value is adjusted up by the page offset.
168 *
169 * Generally speaking, only character devices which are themselves
170 * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
171 * there would be no cache coherency between a descriptor and a VM mapping
172 * both to the same character device.
173 */
174#ifndef _SYS_SYSPROTO_H_
175struct mmap_args {
176	void *addr;
177	size_t len;
178	int prot;
179	int flags;
180	int fd;
181	long pad;
182	off_t pos;
183};
184#endif
185
186/*
187 * MPSAFE
188 */
189int
190sys_mmap(td, uap)
191	struct thread *td;
192	struct mmap_args *uap;
193{
194	struct file *fp;
195	vm_offset_t addr;
196	vm_size_t size, pageoff;
197	vm_prot_t cap_maxprot;
198	int align, error, flags, prot;
199	off_t pos;
200	struct vmspace *vms = td->td_proc->p_vmspace;
201	cap_rights_t rights;
202
203	addr = (vm_offset_t) uap->addr;
204	size = uap->len;
205	prot = uap->prot;
206	flags = uap->flags;
207	pos = uap->pos;
208
209	fp = NULL;
210	AUDIT_ARG_FD(uap->fd);
211
212	/*
213	 * Ignore old flags that used to be defined but did not do anything.
214	 */
215	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
216
217	/*
218	 * Enforce the constraints.
219	 * Mapping of length 0 is only allowed for old binaries.
220	 * Anonymous mapping shall specify -1 as filedescriptor and
221	 * zero position for new code. Be nice to ancient a.out
222	 * binaries and correct pos for anonymous mapping, since old
223	 * ld.so sometimes issues anonymous map requests with non-zero
224	 * pos.
225	 */
226	if (!SV_CURPROC_FLAG(SV_AOUT)) {
227		if ((uap->len == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
228		    ((flags & MAP_ANON) != 0 && (uap->fd != -1 || pos != 0)))
229			return (EINVAL);
230	} else {
231		if ((flags & MAP_ANON) != 0)
232			pos = 0;
233	}
234
235	if (flags & MAP_STACK) {
236		if ((uap->fd != -1) ||
237		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
238			return (EINVAL);
239		flags |= MAP_ANON;
240		pos = 0;
241	}
242	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
243	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
244	    MAP_PREFAULT_READ |
245#ifdef MAP_32BIT
246	    MAP_32BIT |
247#endif
248	    MAP_ALIGNMENT_MASK)) != 0)
249		return (EINVAL);
250	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
251		return (EINVAL);
252	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
253		return (EINVAL);
254	if (prot != PROT_NONE &&
255	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
256		return (EINVAL);
257
258	/*
259	 * Align the file position to a page boundary,
260	 * and save its page offset component.
261	 */
262	pageoff = (pos & PAGE_MASK);
263	pos -= pageoff;
264
265	/* Adjust size for rounding (on both ends). */
266	size += pageoff;			/* low end... */
267	size = (vm_size_t) round_page(size);	/* hi end */
268
269	/* Ensure alignment is at least a page and fits in a pointer. */
270	align = flags & MAP_ALIGNMENT_MASK;
271	if (align != 0 && align != MAP_ALIGNED_SUPER &&
272	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
273	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
274		return (EINVAL);
275
276	/*
277	 * Check for illegal addresses.  Watch out for address wrap... Note
278	 * that VM_*_ADDRESS are not constants due to casts (argh).
279	 */
280	if (flags & MAP_FIXED) {
281		/*
282		 * The specified address must have the same remainder
283		 * as the file offset taken modulo PAGE_SIZE, so it
284		 * should be aligned after adjustment by pageoff.
285		 */
286		addr -= pageoff;
287		if (addr & PAGE_MASK)
288			return (EINVAL);
289
290		/* Address range must be all in user VM space. */
291		if (addr < vm_map_min(&vms->vm_map) ||
292		    addr + size > vm_map_max(&vms->vm_map))
293			return (EINVAL);
294		if (addr + size < addr)
295			return (EINVAL);
296#ifdef MAP_32BIT
297		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
298			return (EINVAL);
299	} else if (flags & MAP_32BIT) {
300		/*
301		 * For MAP_32BIT, override the hint if it is too high and
302		 * do not bother moving the mapping past the heap (since
303		 * the heap is usually above 2GB).
304		 */
305		if (addr + size > MAP_32BIT_MAX_ADDR)
306			addr = 0;
307#endif
308	} else {
309		/*
310		 * XXX for non-fixed mappings where no hint is provided or
311		 * the hint would fall in the potential heap space,
312		 * place it after the end of the largest possible heap.
313		 *
314		 * There should really be a pmap call to determine a reasonable
315		 * location.
316		 */
317		if (addr == 0 ||
318		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
319		    addr < round_page((vm_offset_t)vms->vm_daddr +
320		    lim_max(td, RLIMIT_DATA))))
321			addr = round_page((vm_offset_t)vms->vm_daddr +
322			    lim_max(td, RLIMIT_DATA));
323	}
324	if (size == 0) {
325		/*
326		 * Return success without mapping anything for old
327		 * binaries that request a page-aligned mapping of
328		 * length 0.  For modern binaries, this function
329		 * returns an error earlier.
330		 */
331		error = 0;
332	} else if (flags & MAP_ANON) {
333		/*
334		 * Mapping blank space is trivial.
335		 *
336		 * This relies on VM_PROT_* matching PROT_*.
337		 */
338		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
339		    VM_PROT_ALL, flags, NULL, pos, FALSE, td);
340	} else {
341		/*
342		 * Mapping file, get fp for validation and don't let the
343		 * descriptor disappear on us if we block. Check capability
344		 * rights, but also return the maximum rights to be combined
345		 * with maxprot later.
346		 */
347		cap_rights_init(&rights, CAP_MMAP);
348		if (prot & PROT_READ)
349			cap_rights_set(&rights, CAP_MMAP_R);
350		if ((flags & MAP_SHARED) != 0) {
351			if (prot & PROT_WRITE)
352				cap_rights_set(&rights, CAP_MMAP_W);
353		}
354		if (prot & PROT_EXEC)
355			cap_rights_set(&rights, CAP_MMAP_X);
356		error = fget_mmap(td, uap->fd, &rights, &cap_maxprot, &fp);
357		if (error != 0)
358			goto done;
359		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
360		    td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) {
361			error = EINVAL;
362			goto done;
363		}
364
365		/* This relies on VM_PROT_* matching PROT_*. */
366		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
367		    cap_maxprot, flags, pos, td);
368	}
369
370	if (error == 0)
371		td->td_retval[0] = (register_t) (addr + pageoff);
372done:
373	if (fp)
374		fdrop(fp, td);
375
376	return (error);
377}
378
379#if defined(COMPAT_FREEBSD6)
380int
381freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
382{
383	struct mmap_args oargs;
384
385	oargs.addr = uap->addr;
386	oargs.len = uap->len;
387	oargs.prot = uap->prot;
388	oargs.flags = uap->flags;
389	oargs.fd = uap->fd;
390	oargs.pos = uap->pos;
391	return (sys_mmap(td, &oargs));
392}
393#endif
394
395#ifdef COMPAT_43
396#ifndef _SYS_SYSPROTO_H_
397struct ommap_args {
398	caddr_t addr;
399	int len;
400	int prot;
401	int flags;
402	int fd;
403	long pos;
404};
405#endif
406int
407ommap(td, uap)
408	struct thread *td;
409	struct ommap_args *uap;
410{
411	struct mmap_args nargs;
412	static const char cvtbsdprot[8] = {
413		0,
414		PROT_EXEC,
415		PROT_WRITE,
416		PROT_EXEC | PROT_WRITE,
417		PROT_READ,
418		PROT_EXEC | PROT_READ,
419		PROT_WRITE | PROT_READ,
420		PROT_EXEC | PROT_WRITE | PROT_READ,
421	};
422
423#define	OMAP_ANON	0x0002
424#define	OMAP_COPY	0x0020
425#define	OMAP_SHARED	0x0010
426#define	OMAP_FIXED	0x0100
427
428	nargs.addr = uap->addr;
429	nargs.len = uap->len;
430	nargs.prot = cvtbsdprot[uap->prot & 0x7];
431#ifdef COMPAT_FREEBSD32
432#if defined(__amd64__)
433	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
434	    nargs.prot != 0)
435		nargs.prot |= PROT_EXEC;
436#endif
437#endif
438	nargs.flags = 0;
439	if (uap->flags & OMAP_ANON)
440		nargs.flags |= MAP_ANON;
441	if (uap->flags & OMAP_COPY)
442		nargs.flags |= MAP_COPY;
443	if (uap->flags & OMAP_SHARED)
444		nargs.flags |= MAP_SHARED;
445	else
446		nargs.flags |= MAP_PRIVATE;
447	if (uap->flags & OMAP_FIXED)
448		nargs.flags |= MAP_FIXED;
449	nargs.fd = uap->fd;
450	nargs.pos = uap->pos;
451	return (sys_mmap(td, &nargs));
452}
453#endif				/* COMPAT_43 */
454
455
456#ifndef _SYS_SYSPROTO_H_
457struct msync_args {
458	void *addr;
459	size_t len;
460	int flags;
461};
462#endif
463/*
464 * MPSAFE
465 */
466int
467sys_msync(td, uap)
468	struct thread *td;
469	struct msync_args *uap;
470{
471	vm_offset_t addr;
472	vm_size_t size, pageoff;
473	int flags;
474	vm_map_t map;
475	int rv;
476
477	addr = (vm_offset_t) uap->addr;
478	size = uap->len;
479	flags = uap->flags;
480
481	pageoff = (addr & PAGE_MASK);
482	addr -= pageoff;
483	size += pageoff;
484	size = (vm_size_t) round_page(size);
485	if (addr + size < addr)
486		return (EINVAL);
487
488	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
489		return (EINVAL);
490
491	map = &td->td_proc->p_vmspace->vm_map;
492
493	/*
494	 * Clean the pages and interpret the return value.
495	 */
496	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
497	    (flags & MS_INVALIDATE) != 0);
498	switch (rv) {
499	case KERN_SUCCESS:
500		return (0);
501	case KERN_INVALID_ADDRESS:
502		return (ENOMEM);
503	case KERN_INVALID_ARGUMENT:
504		return (EBUSY);
505	case KERN_FAILURE:
506		return (EIO);
507	default:
508		return (EINVAL);
509	}
510}
511
512#ifndef _SYS_SYSPROTO_H_
513struct munmap_args {
514	void *addr;
515	size_t len;
516};
517#endif
518/*
519 * MPSAFE
520 */
521int
522sys_munmap(td, uap)
523	struct thread *td;
524	struct munmap_args *uap;
525{
526#ifdef HWPMC_HOOKS
527	struct pmckern_map_out pkm;
528	vm_map_entry_t entry;
529#endif
530	vm_offset_t addr;
531	vm_size_t size, pageoff;
532	vm_map_t map;
533
534	addr = (vm_offset_t) uap->addr;
535	size = uap->len;
536	if (size == 0)
537		return (EINVAL);
538
539	pageoff = (addr & PAGE_MASK);
540	addr -= pageoff;
541	size += pageoff;
542	size = (vm_size_t) round_page(size);
543	if (addr + size < addr)
544		return (EINVAL);
545
546	/*
547	 * Check for illegal addresses.  Watch out for address wrap...
548	 */
549	map = &td->td_proc->p_vmspace->vm_map;
550	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
551		return (EINVAL);
552	vm_map_lock(map);
553#ifdef HWPMC_HOOKS
554	/*
555	 * Inform hwpmc if the address range being unmapped contains
556	 * an executable region.
557	 */
558	pkm.pm_address = (uintptr_t) NULL;
559	if (vm_map_lookup_entry(map, addr, &entry)) {
560		for (;
561		     entry != &map->header && entry->start < addr + size;
562		     entry = entry->next) {
563			if (vm_map_check_protection(map, entry->start,
564				entry->end, VM_PROT_EXECUTE) == TRUE) {
565				pkm.pm_address = (uintptr_t) addr;
566				pkm.pm_size = (size_t) size;
567				break;
568			}
569		}
570	}
571#endif
572	vm_map_delete(map, addr, addr + size);
573
574#ifdef HWPMC_HOOKS
575	/* downgrade the lock to prevent a LOR with the pmc-sx lock */
576	vm_map_lock_downgrade(map);
577	if (pkm.pm_address != (uintptr_t) NULL)
578		PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
579	vm_map_unlock_read(map);
580#else
581	vm_map_unlock(map);
582#endif
583	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
584	return (0);
585}
586
587#ifndef _SYS_SYSPROTO_H_
588struct mprotect_args {
589	const void *addr;
590	size_t len;
591	int prot;
592};
593#endif
594/*
595 * MPSAFE
596 */
597int
598sys_mprotect(td, uap)
599	struct thread *td;
600	struct mprotect_args *uap;
601{
602	vm_offset_t addr;
603	vm_size_t size, pageoff;
604	vm_prot_t prot;
605
606	addr = (vm_offset_t) uap->addr;
607	size = uap->len;
608	prot = uap->prot & VM_PROT_ALL;
609
610	pageoff = (addr & PAGE_MASK);
611	addr -= pageoff;
612	size += pageoff;
613	size = (vm_size_t) round_page(size);
614	if (addr + size < addr)
615		return (EINVAL);
616
617	switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
618	    addr + size, prot, FALSE)) {
619	case KERN_SUCCESS:
620		return (0);
621	case KERN_PROTECTION_FAILURE:
622		return (EACCES);
623	case KERN_RESOURCE_SHORTAGE:
624		return (ENOMEM);
625	}
626	return (EINVAL);
627}
628
629#ifndef _SYS_SYSPROTO_H_
630struct minherit_args {
631	void *addr;
632	size_t len;
633	int inherit;
634};
635#endif
636/*
637 * MPSAFE
638 */
639int
640sys_minherit(td, uap)
641	struct thread *td;
642	struct minherit_args *uap;
643{
644	vm_offset_t addr;
645	vm_size_t size, pageoff;
646	vm_inherit_t inherit;
647
648	addr = (vm_offset_t)uap->addr;
649	size = uap->len;
650	inherit = uap->inherit;
651
652	pageoff = (addr & PAGE_MASK);
653	addr -= pageoff;
654	size += pageoff;
655	size = (vm_size_t) round_page(size);
656	if (addr + size < addr)
657		return (EINVAL);
658
659	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
660	    addr + size, inherit)) {
661	case KERN_SUCCESS:
662		return (0);
663	case KERN_PROTECTION_FAILURE:
664		return (EACCES);
665	}
666	return (EINVAL);
667}
668
669#ifndef _SYS_SYSPROTO_H_
670struct madvise_args {
671	void *addr;
672	size_t len;
673	int behav;
674};
675#endif
676
677/*
678 * MPSAFE
679 */
680int
681sys_madvise(td, uap)
682	struct thread *td;
683	struct madvise_args *uap;
684{
685	vm_offset_t start, end;
686	vm_map_t map;
687	int flags;
688
689	/*
690	 * Check for our special case, advising the swap pager we are
691	 * "immortal."
692	 */
693	if (uap->behav == MADV_PROTECT) {
694		flags = PPROT_SET;
695		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
696		    PROC_SPROTECT, &flags));
697	}
698
699	/*
700	 * Check for illegal behavior
701	 */
702	if (uap->behav < 0 || uap->behav > MADV_CORE)
703		return (EINVAL);
704	/*
705	 * Check for illegal addresses.  Watch out for address wrap... Note
706	 * that VM_*_ADDRESS are not constants due to casts (argh).
707	 */
708	map = &td->td_proc->p_vmspace->vm_map;
709	if ((vm_offset_t)uap->addr < vm_map_min(map) ||
710	    (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
711		return (EINVAL);
712	if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
713		return (EINVAL);
714
715	/*
716	 * Since this routine is only advisory, we default to conservative
717	 * behavior.
718	 */
719	start = trunc_page((vm_offset_t) uap->addr);
720	end = round_page((vm_offset_t) uap->addr + uap->len);
721
722	if (vm_map_madvise(map, start, end, uap->behav))
723		return (EINVAL);
724	return (0);
725}
726
727#ifndef _SYS_SYSPROTO_H_
728struct mincore_args {
729	const void *addr;
730	size_t len;
731	char *vec;
732};
733#endif
734
735/*
736 * MPSAFE
737 */
738int
739sys_mincore(td, uap)
740	struct thread *td;
741	struct mincore_args *uap;
742{
743	vm_offset_t addr, first_addr;
744	vm_offset_t end, cend;
745	pmap_t pmap;
746	vm_map_t map;
747	char *vec;
748	int error = 0;
749	int vecindex, lastvecindex;
750	vm_map_entry_t current;
751	vm_map_entry_t entry;
752	vm_object_t object;
753	vm_paddr_t locked_pa;
754	vm_page_t m;
755	vm_pindex_t pindex;
756	int mincoreinfo;
757	unsigned int timestamp;
758	boolean_t locked;
759
760	/*
761	 * Make sure that the addresses presented are valid for user
762	 * mode.
763	 */
764	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
765	end = addr + (vm_size_t)round_page(uap->len);
766	map = &td->td_proc->p_vmspace->vm_map;
767	if (end > vm_map_max(map) || end < addr)
768		return (ENOMEM);
769
770	/*
771	 * Address of byte vector
772	 */
773	vec = uap->vec;
774
775	pmap = vmspace_pmap(td->td_proc->p_vmspace);
776
777	vm_map_lock_read(map);
778RestartScan:
779	timestamp = map->timestamp;
780
781	if (!vm_map_lookup_entry(map, addr, &entry)) {
782		vm_map_unlock_read(map);
783		return (ENOMEM);
784	}
785
786	/*
787	 * Do this on a map entry basis so that if the pages are not
788	 * in the current processes address space, we can easily look
789	 * up the pages elsewhere.
790	 */
791	lastvecindex = -1;
792	for (current = entry;
793	    (current != &map->header) && (current->start < end);
794	    current = current->next) {
795
796		/*
797		 * check for contiguity
798		 */
799		if (current->end < end &&
800		    (entry->next == &map->header ||
801		     current->next->start > current->end)) {
802			vm_map_unlock_read(map);
803			return (ENOMEM);
804		}
805
806		/*
807		 * ignore submaps (for now) or null objects
808		 */
809		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
810			current->object.vm_object == NULL)
811			continue;
812
813		/*
814		 * limit this scan to the current map entry and the
815		 * limits for the mincore call
816		 */
817		if (addr < current->start)
818			addr = current->start;
819		cend = current->end;
820		if (cend > end)
821			cend = end;
822
823		/*
824		 * scan this entry one page at a time
825		 */
826		while (addr < cend) {
827			/*
828			 * Check pmap first, it is likely faster, also
829			 * it can provide info as to whether we are the
830			 * one referencing or modifying the page.
831			 */
832			object = NULL;
833			locked_pa = 0;
834		retry:
835			m = NULL;
836			mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
837			if (locked_pa != 0) {
838				/*
839				 * The page is mapped by this process but not
840				 * both accessed and modified.  It is also
841				 * managed.  Acquire the object lock so that
842				 * other mappings might be examined.
843				 */
844				m = PHYS_TO_VM_PAGE(locked_pa);
845				if (m->object != object) {
846					if (object != NULL)
847						VM_OBJECT_WUNLOCK(object);
848					object = m->object;
849					locked = VM_OBJECT_TRYWLOCK(object);
850					vm_page_unlock(m);
851					if (!locked) {
852						VM_OBJECT_WLOCK(object);
853						vm_page_lock(m);
854						goto retry;
855					}
856				} else
857					vm_page_unlock(m);
858				KASSERT(m->valid == VM_PAGE_BITS_ALL,
859				    ("mincore: page %p is mapped but invalid",
860				    m));
861			} else if (mincoreinfo == 0) {
862				/*
863				 * The page is not mapped by this process.  If
864				 * the object implements managed pages, then
865				 * determine if the page is resident so that
866				 * the mappings might be examined.
867				 */
868				if (current->object.vm_object != object) {
869					if (object != NULL)
870						VM_OBJECT_WUNLOCK(object);
871					object = current->object.vm_object;
872					VM_OBJECT_WLOCK(object);
873				}
874				if (object->type == OBJT_DEFAULT ||
875				    object->type == OBJT_SWAP ||
876				    object->type == OBJT_VNODE) {
877					pindex = OFF_TO_IDX(current->offset +
878					    (addr - current->start));
879					m = vm_page_lookup(object, pindex);
880					if (m == NULL &&
881					    vm_page_is_cached(object, pindex))
882						mincoreinfo = MINCORE_INCORE;
883					if (m != NULL && m->valid == 0)
884						m = NULL;
885					if (m != NULL)
886						mincoreinfo = MINCORE_INCORE;
887				}
888			}
889			if (m != NULL) {
890				/* Examine other mappings to the page. */
891				if (m->dirty == 0 && pmap_is_modified(m))
892					vm_page_dirty(m);
893				if (m->dirty != 0)
894					mincoreinfo |= MINCORE_MODIFIED_OTHER;
895				/*
896				 * The first test for PGA_REFERENCED is an
897				 * optimization.  The second test is
898				 * required because a concurrent pmap
899				 * operation could clear the last reference
900				 * and set PGA_REFERENCED before the call to
901				 * pmap_is_referenced().
902				 */
903				if ((m->aflags & PGA_REFERENCED) != 0 ||
904				    pmap_is_referenced(m) ||
905				    (m->aflags & PGA_REFERENCED) != 0)
906					mincoreinfo |= MINCORE_REFERENCED_OTHER;
907			}
908			if (object != NULL)
909				VM_OBJECT_WUNLOCK(object);
910
911			/*
912			 * subyte may page fault.  In case it needs to modify
913			 * the map, we release the lock.
914			 */
915			vm_map_unlock_read(map);
916
917			/*
918			 * calculate index into user supplied byte vector
919			 */
920			vecindex = OFF_TO_IDX(addr - first_addr);
921
922			/*
923			 * If we have skipped map entries, we need to make sure that
924			 * the byte vector is zeroed for those skipped entries.
925			 */
926			while ((lastvecindex + 1) < vecindex) {
927				++lastvecindex;
928				error = subyte(vec + lastvecindex, 0);
929				if (error) {
930					error = EFAULT;
931					goto done2;
932				}
933			}
934
935			/*
936			 * Pass the page information to the user
937			 */
938			error = subyte(vec + vecindex, mincoreinfo);
939			if (error) {
940				error = EFAULT;
941				goto done2;
942			}
943
944			/*
945			 * If the map has changed, due to the subyte, the previous
946			 * output may be invalid.
947			 */
948			vm_map_lock_read(map);
949			if (timestamp != map->timestamp)
950				goto RestartScan;
951
952			lastvecindex = vecindex;
953			addr += PAGE_SIZE;
954		}
955	}
956
957	/*
958	 * subyte may page fault.  In case it needs to modify
959	 * the map, we release the lock.
960	 */
961	vm_map_unlock_read(map);
962
963	/*
964	 * Zero the last entries in the byte vector.
965	 */
966	vecindex = OFF_TO_IDX(end - first_addr);
967	while ((lastvecindex + 1) < vecindex) {
968		++lastvecindex;
969		error = subyte(vec + lastvecindex, 0);
970		if (error) {
971			error = EFAULT;
972			goto done2;
973		}
974	}
975
976	/*
977	 * If the map has changed, due to the subyte, the previous
978	 * output may be invalid.
979	 */
980	vm_map_lock_read(map);
981	if (timestamp != map->timestamp)
982		goto RestartScan;
983	vm_map_unlock_read(map);
984done2:
985	return (error);
986}
987
988#ifndef _SYS_SYSPROTO_H_
989struct mlock_args {
990	const void *addr;
991	size_t len;
992};
993#endif
994/*
995 * MPSAFE
996 */
997int
998sys_mlock(td, uap)
999	struct thread *td;
1000	struct mlock_args *uap;
1001{
1002
1003	return (vm_mlock(td->td_proc, td->td_ucred, uap->addr, uap->len));
1004}
1005
1006int
1007vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len)
1008{
1009	vm_offset_t addr, end, last, start;
1010	vm_size_t npages, size;
1011	vm_map_t map;
1012	unsigned long nsize;
1013	int error;
1014
1015	error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
1016	if (error)
1017		return (error);
1018	addr = (vm_offset_t)addr0;
1019	size = len;
1020	last = addr + size;
1021	start = trunc_page(addr);
1022	end = round_page(last);
1023	if (last < addr || end < addr)
1024		return (EINVAL);
1025	npages = atop(end - start);
1026	if (npages > vm_page_max_wired)
1027		return (ENOMEM);
1028	map = &proc->p_vmspace->vm_map;
1029	PROC_LOCK(proc);
1030	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1031	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
1032		PROC_UNLOCK(proc);
1033		return (ENOMEM);
1034	}
1035	PROC_UNLOCK(proc);
1036	if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
1037		return (EAGAIN);
1038#ifdef RACCT
1039	if (racct_enable) {
1040		PROC_LOCK(proc);
1041		error = racct_set(proc, RACCT_MEMLOCK, nsize);
1042		PROC_UNLOCK(proc);
1043		if (error != 0)
1044			return (ENOMEM);
1045	}
1046#endif
1047	error = vm_map_wire(map, start, end,
1048	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1049#ifdef RACCT
1050	if (racct_enable && error != KERN_SUCCESS) {
1051		PROC_LOCK(proc);
1052		racct_set(proc, RACCT_MEMLOCK,
1053		    ptoa(pmap_wired_count(map->pmap)));
1054		PROC_UNLOCK(proc);
1055	}
1056#endif
1057	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1058}
1059
1060#ifndef _SYS_SYSPROTO_H_
1061struct mlockall_args {
1062	int	how;
1063};
1064#endif
1065
1066/*
1067 * MPSAFE
1068 */
1069int
1070sys_mlockall(td, uap)
1071	struct thread *td;
1072	struct mlockall_args *uap;
1073{
1074	vm_map_t map;
1075	int error;
1076
1077	map = &td->td_proc->p_vmspace->vm_map;
1078	error = priv_check(td, PRIV_VM_MLOCK);
1079	if (error)
1080		return (error);
1081
1082	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1083		return (EINVAL);
1084
1085	/*
1086	 * If wiring all pages in the process would cause it to exceed
1087	 * a hard resource limit, return ENOMEM.
1088	 */
1089	if (!old_mlock && uap->how & MCL_CURRENT) {
1090		PROC_LOCK(td->td_proc);
1091		if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) {
1092			PROC_UNLOCK(td->td_proc);
1093			return (ENOMEM);
1094		}
1095		PROC_UNLOCK(td->td_proc);
1096	}
1097#ifdef RACCT
1098	if (racct_enable) {
1099		PROC_LOCK(td->td_proc);
1100		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
1101		PROC_UNLOCK(td->td_proc);
1102		if (error != 0)
1103			return (ENOMEM);
1104	}
1105#endif
1106
1107	if (uap->how & MCL_FUTURE) {
1108		vm_map_lock(map);
1109		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1110		vm_map_unlock(map);
1111		error = 0;
1112	}
1113
1114	if (uap->how & MCL_CURRENT) {
1115		/*
1116		 * P1003.1-2001 mandates that all currently mapped pages
1117		 * will be memory resident and locked (wired) upon return
1118		 * from mlockall(). vm_map_wire() will wire pages, by
1119		 * calling vm_fault_wire() for each page in the region.
1120		 */
1121		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1122		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1123		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1124	}
1125#ifdef RACCT
1126	if (racct_enable && error != KERN_SUCCESS) {
1127		PROC_LOCK(td->td_proc);
1128		racct_set(td->td_proc, RACCT_MEMLOCK,
1129		    ptoa(pmap_wired_count(map->pmap)));
1130		PROC_UNLOCK(td->td_proc);
1131	}
1132#endif
1133
1134	return (error);
1135}
1136
1137#ifndef _SYS_SYSPROTO_H_
1138struct munlockall_args {
1139	register_t dummy;
1140};
1141#endif
1142
1143/*
1144 * MPSAFE
1145 */
1146int
1147sys_munlockall(td, uap)
1148	struct thread *td;
1149	struct munlockall_args *uap;
1150{
1151	vm_map_t map;
1152	int error;
1153
1154	map = &td->td_proc->p_vmspace->vm_map;
1155	error = priv_check(td, PRIV_VM_MUNLOCK);
1156	if (error)
1157		return (error);
1158
1159	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1160	vm_map_lock(map);
1161	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1162	vm_map_unlock(map);
1163
1164	/* Forcibly unwire all pages. */
1165	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1166	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1167#ifdef RACCT
1168	if (racct_enable && error == KERN_SUCCESS) {
1169		PROC_LOCK(td->td_proc);
1170		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
1171		PROC_UNLOCK(td->td_proc);
1172	}
1173#endif
1174
1175	return (error);
1176}
1177
1178#ifndef _SYS_SYSPROTO_H_
1179struct munlock_args {
1180	const void *addr;
1181	size_t len;
1182};
1183#endif
1184/*
1185 * MPSAFE
1186 */
1187int
1188sys_munlock(td, uap)
1189	struct thread *td;
1190	struct munlock_args *uap;
1191{
1192	vm_offset_t addr, end, last, start;
1193	vm_size_t size;
1194#ifdef RACCT
1195	vm_map_t map;
1196#endif
1197	int error;
1198
1199	error = priv_check(td, PRIV_VM_MUNLOCK);
1200	if (error)
1201		return (error);
1202	addr = (vm_offset_t)uap->addr;
1203	size = uap->len;
1204	last = addr + size;
1205	start = trunc_page(addr);
1206	end = round_page(last);
1207	if (last < addr || end < addr)
1208		return (EINVAL);
1209	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1210	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1211#ifdef RACCT
1212	if (racct_enable && error == KERN_SUCCESS) {
1213		PROC_LOCK(td->td_proc);
1214		map = &td->td_proc->p_vmspace->vm_map;
1215		racct_set(td->td_proc, RACCT_MEMLOCK,
1216		    ptoa(pmap_wired_count(map->pmap)));
1217		PROC_UNLOCK(td->td_proc);
1218	}
1219#endif
1220	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1221}
1222
1223/*
1224 * vm_mmap_vnode()
1225 *
1226 * Helper function for vm_mmap.  Perform sanity check specific for mmap
1227 * operations on vnodes.
1228 */
1229int
1230vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1231    vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1232    struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
1233    boolean_t *writecounted)
1234{
1235	struct vattr va;
1236	vm_object_t obj;
1237	vm_offset_t foff;
1238	struct ucred *cred;
1239	int error, flags, locktype;
1240
1241	cred = td->td_ucred;
1242	if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED))
1243		locktype = LK_EXCLUSIVE;
1244	else
1245		locktype = LK_SHARED;
1246	if ((error = vget(vp, locktype, td)) != 0)
1247		return (error);
1248	AUDIT_ARG_VNODE1(vp);
1249	foff = *foffp;
1250	flags = *flagsp;
1251	obj = vp->v_object;
1252	if (vp->v_type == VREG) {
1253		/*
1254		 * Get the proper underlying object
1255		 */
1256		if (obj == NULL) {
1257			error = EINVAL;
1258			goto done;
1259		}
1260		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1261			vput(vp);
1262			vp = (struct vnode *)obj->handle;
1263			/*
1264			 * Bypass filesystems obey the mpsafety of the
1265			 * underlying fs.  Tmpfs never bypasses.
1266			 */
1267			error = vget(vp, locktype, td);
1268			if (error != 0)
1269				return (error);
1270		}
1271		if (locktype == LK_EXCLUSIVE) {
1272			*writecounted = TRUE;
1273			vnode_pager_update_writecount(obj, 0, objsize);
1274		}
1275	} else {
1276		error = EINVAL;
1277		goto done;
1278	}
1279	if ((error = VOP_GETATTR(vp, &va, cred)))
1280		goto done;
1281#ifdef MAC
1282	/* This relies on VM_PROT_* matching PROT_*. */
1283	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1284	if (error != 0)
1285		goto done;
1286#endif
1287	if ((flags & MAP_SHARED) != 0) {
1288		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1289			if (prot & VM_PROT_WRITE) {
1290				error = EPERM;
1291				goto done;
1292			}
1293			*maxprotp &= ~VM_PROT_WRITE;
1294		}
1295	}
1296	/*
1297	 * If it is a regular file without any references
1298	 * we do not need to sync it.
1299	 * Adjust object size to be the size of actual file.
1300	 */
1301	objsize = round_page(va.va_size);
1302	if (va.va_nlink == 0)
1303		flags |= MAP_NOSYNC;
1304	if (obj->type == OBJT_VNODE) {
1305		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1306		    cred);
1307		if (obj == NULL) {
1308			error = ENOMEM;
1309			goto done;
1310		}
1311	} else {
1312		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
1313		    ("wrong object type"));
1314		VM_OBJECT_WLOCK(obj);
1315		vm_object_reference_locked(obj);
1316#if VM_NRESERVLEVEL > 0
1317		vm_object_color(obj, 0);
1318#endif
1319		VM_OBJECT_WUNLOCK(obj);
1320	}
1321	*objp = obj;
1322	*flagsp = flags;
1323
1324	vfs_mark_atime(vp, cred);
1325
1326done:
1327	if (error != 0 && *writecounted) {
1328		*writecounted = FALSE;
1329		vnode_pager_update_writecount(obj, objsize, 0);
1330	}
1331	vput(vp);
1332	return (error);
1333}
1334
1335/*
1336 * vm_mmap_cdev()
1337 *
1338 * MPSAFE
1339 *
1340 * Helper function for vm_mmap.  Perform sanity check specific for mmap
1341 * operations on cdevs.
1342 */
1343int
1344vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1345    vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
1346    vm_ooffset_t *foff, vm_object_t *objp)
1347{
1348	vm_object_t obj;
1349	int error, flags;
1350
1351	flags = *flagsp;
1352
1353	if (dsw->d_flags & D_MMAP_ANON) {
1354		*objp = NULL;
1355		*foff = 0;
1356		*maxprotp = VM_PROT_ALL;
1357		*flagsp |= MAP_ANON;
1358		return (0);
1359	}
1360	/*
1361	 * cdevs do not provide private mappings of any kind.
1362	 */
1363	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1364	    (prot & VM_PROT_WRITE) != 0)
1365		return (EACCES);
1366	if (flags & (MAP_PRIVATE|MAP_COPY))
1367		return (EINVAL);
1368	/*
1369	 * Force device mappings to be shared.
1370	 */
1371	flags |= MAP_SHARED;
1372#ifdef MAC_XXX
1373	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
1374	if (error != 0)
1375		return (error);
1376#endif
1377	/*
1378	 * First, try d_mmap_single().  If that is not implemented
1379	 * (returns ENODEV), fall back to using the device pager.
1380	 * Note that d_mmap_single() must return a reference to the
1381	 * object (it needs to bump the reference count of the object
1382	 * it returns somehow).
1383	 *
1384	 * XXX assumes VM_PROT_* == PROT_*
1385	 */
1386	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1387	if (error != ENODEV)
1388		return (error);
1389	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1390	    td->td_ucred);
1391	if (obj == NULL)
1392		return (EINVAL);
1393	*objp = obj;
1394	*flagsp = flags;
1395	return (0);
1396}
1397
1398/*
1399 * vm_mmap()
1400 *
1401 * Internal version of mmap used by exec, sys5 shared memory, and
1402 * various device drivers.  Handle is either a vnode pointer, a
1403 * character device, or NULL for MAP_ANON.
1404 */
1405int
1406vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1407	vm_prot_t maxprot, int flags,
1408	objtype_t handle_type, void *handle,
1409	vm_ooffset_t foff)
1410{
1411	vm_object_t object;
1412	struct thread *td = curthread;
1413	int error;
1414	boolean_t writecounted;
1415
1416	if (size == 0)
1417		return (EINVAL);
1418
1419	size = round_page(size);
1420	object = NULL;
1421	writecounted = FALSE;
1422
1423	/*
1424	 * Lookup/allocate object.
1425	 */
1426	switch (handle_type) {
1427	case OBJT_DEVICE: {
1428		struct cdevsw *dsw;
1429		struct cdev *cdev;
1430		int ref;
1431
1432		cdev = handle;
1433		dsw = dev_refthread(cdev, &ref);
1434		if (dsw == NULL)
1435			return (ENXIO);
1436		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
1437		    dsw, &foff, &object);
1438		dev_relthread(cdev, ref);
1439		break;
1440	}
1441	case OBJT_VNODE:
1442		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1443		    handle, &foff, &object, &writecounted);
1444		break;
1445	case OBJT_DEFAULT:
1446		if (handle == NULL) {
1447			error = 0;
1448			break;
1449		}
1450		/* FALLTHROUGH */
1451	default:
1452		error = EINVAL;
1453		break;
1454	}
1455	if (error)
1456		return (error);
1457
1458	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1459	    foff, writecounted, td);
1460	if (error != 0 && object != NULL) {
1461		/*
1462		 * If this mapping was accounted for in the vnode's
1463		 * writecount, then undo that now.
1464		 */
1465		if (writecounted)
1466			vnode_pager_release_writecount(object, 0, size);
1467		vm_object_deallocate(object);
1468	}
1469	return (error);
1470}
1471
1472/*
1473 * Internal version of mmap that maps a specific VM object into an
1474 * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
1475 */
1476int
1477vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1478    vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
1479    boolean_t writecounted, struct thread *td)
1480{
1481	boolean_t fitit;
1482	int docow, error, findspace, rv;
1483
1484	if (map == &td->td_proc->p_vmspace->vm_map) {
1485		PROC_LOCK(td->td_proc);
1486		if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) {
1487			PROC_UNLOCK(td->td_proc);
1488			return (ENOMEM);
1489		}
1490		if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
1491			PROC_UNLOCK(td->td_proc);
1492			return (ENOMEM);
1493		}
1494		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
1495			if (ptoa(pmap_wired_count(map->pmap)) + size >
1496			    lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) {
1497				racct_set_force(td->td_proc, RACCT_VMEM,
1498				    map->size);
1499				PROC_UNLOCK(td->td_proc);
1500				return (ENOMEM);
1501			}
1502			error = racct_set(td->td_proc, RACCT_MEMLOCK,
1503			    ptoa(pmap_wired_count(map->pmap)) + size);
1504			if (error != 0) {
1505				racct_set_force(td->td_proc, RACCT_VMEM,
1506				    map->size);
1507				PROC_UNLOCK(td->td_proc);
1508				return (error);
1509			}
1510		}
1511		PROC_UNLOCK(td->td_proc);
1512	}
1513
1514	/*
1515	 * We currently can only deal with page aligned file offsets.
1516	 * The mmap() system call already enforces this by subtracting
1517	 * the page offset from the file offset, but checking here
1518	 * catches errors in device drivers (e.g. d_single_mmap()
1519	 * callbacks) and other internal mapping requests (such as in
1520	 * exec).
1521	 */
1522	if (foff & PAGE_MASK)
1523		return (EINVAL);
1524
1525	if ((flags & MAP_FIXED) == 0) {
1526		fitit = TRUE;
1527		*addr = round_page(*addr);
1528	} else {
1529		if (*addr != trunc_page(*addr))
1530			return (EINVAL);
1531		fitit = FALSE;
1532	}
1533
1534	if (flags & MAP_ANON) {
1535		if (object != NULL || foff != 0)
1536			return (EINVAL);
1537		docow = 0;
1538	} else if (flags & MAP_PREFAULT_READ)
1539		docow = MAP_PREFAULT;
1540	else
1541		docow = MAP_PREFAULT_PARTIAL;
1542
1543	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1544		docow |= MAP_COPY_ON_WRITE;
1545	if (flags & MAP_NOSYNC)
1546		docow |= MAP_DISABLE_SYNCER;
1547	if (flags & MAP_NOCORE)
1548		docow |= MAP_DISABLE_COREDUMP;
1549	/* Shared memory is also shared with children. */
1550	if (flags & MAP_SHARED)
1551		docow |= MAP_INHERIT_SHARE;
1552	if (writecounted)
1553		docow |= MAP_VN_WRITECOUNT;
1554	if (flags & MAP_STACK) {
1555		if (object != NULL)
1556			return (EINVAL);
1557		docow |= MAP_STACK_GROWS_DOWN;
1558	}
1559	if ((flags & MAP_EXCL) != 0)
1560		docow |= MAP_CHECK_EXCL;
1561
1562	if (fitit) {
1563		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
1564			findspace = VMFS_SUPER_SPACE;
1565		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
1566			findspace = VMFS_ALIGNED_SPACE(flags >>
1567			    MAP_ALIGNMENT_SHIFT);
1568		else
1569			findspace = VMFS_OPTIMAL_SPACE;
1570		rv = vm_map_find(map, object, foff, addr, size,
1571#ifdef MAP_32BIT
1572		    flags & MAP_32BIT ? MAP_32BIT_MAX_ADDR :
1573#endif
1574		    0, findspace, prot, maxprot, docow);
1575	} else {
1576		rv = vm_map_fixed(map, object, foff, *addr, size,
1577		    prot, maxprot, docow);
1578	}
1579
1580	if (rv == KERN_SUCCESS) {
1581		/*
1582		 * If the process has requested that all future mappings
1583		 * be wired, then heed this.
1584		 */
1585		if (map->flags & MAP_WIREFUTURE) {
1586			vm_map_wire(map, *addr, *addr + size,
1587			    VM_MAP_WIRE_USER | ((flags & MAP_STACK) ?
1588			    VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES));
1589		}
1590	}
1591	return (vm_mmap_to_errno(rv));
1592}
1593
1594/*
1595 * Translate a Mach VM return code to zero on success or the appropriate errno
1596 * on failure.
1597 */
1598int
1599vm_mmap_to_errno(int rv)
1600{
1601
1602	switch (rv) {
1603	case KERN_SUCCESS:
1604		return (0);
1605	case KERN_INVALID_ADDRESS:
1606	case KERN_NO_SPACE:
1607		return (ENOMEM);
1608	case KERN_PROTECTION_FAILURE:
1609		return (EACCES);
1610	default:
1611		return (EINVAL);
1612	}
1613}
1614