Deleted Added
full compact
vm_mmap.c (9456) vm_mmap.c (9507)
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.

--- 24 unchanged lines hidden (view full) ---

33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 *
40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.

--- 24 unchanged lines hidden (view full) ---

33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39 *
40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
41 * $Id: vm_mmap.c,v 1.24 1995/05/30 08:16:09 rgrimes Exp $
41 * $Id: vm_mmap.c,v 1.25 1995/07/09 06:58:01 davidg Exp $
42 */
43
44/*
45 * Mapped file (mmap) interface to VM
46 */
47
48#include <sys/param.h>
49#include <sys/systm.h>

--- 7 unchanged lines hidden (view full) ---

57
58#include <miscfs/specfs/specdev.h>
59
60#include <vm/vm.h>
61#include <vm/vm_pager.h>
62#include <vm/vm_pageout.h>
63#include <vm/vm_prot.h>
64
42 */
43
44/*
45 * Mapped file (mmap) interface to VM
46 */
47
48#include <sys/param.h>
49#include <sys/systm.h>

--- 7 unchanged lines hidden (view full) ---

57
58#include <miscfs/specfs/specdev.h>
59
60#include <vm/vm.h>
61#include <vm/vm_pager.h>
62#include <vm/vm_pageout.h>
63#include <vm/vm_prot.h>
64
65#ifdef DEBUG
66int mmapdebug;
67
68#define MDB_FOLLOW 0x01
69#define MDB_SYNC 0x02
70#define MDB_MAPIT 0x04
71#endif
72
73void pmap_object_init_pt();
74
75struct sbrk_args {
76 int incr;
77};
78
79/* ARGSUSED */
80int

--- 63 unchanged lines hidden (view full) ---

144 vm_offset_t addr;
145 vm_size_t size;
146 vm_prot_t prot, maxprot;
147 caddr_t handle;
148 int flags, error;
149
150 prot = uap->prot & VM_PROT_ALL;
151 flags = uap->flags;
65void pmap_object_init_pt();
66
67struct sbrk_args {
68 int incr;
69};
70
71/* ARGSUSED */
72int

--- 63 unchanged lines hidden (view full) ---

136 vm_offset_t addr;
137 vm_size_t size;
138 vm_prot_t prot, maxprot;
139 caddr_t handle;
140 int flags, error;
141
142 prot = uap->prot & VM_PROT_ALL;
143 flags = uap->flags;
152#ifdef DEBUG
153 if (mmapdebug & MDB_FOLLOW)
154 printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
155 p->p_pid, uap->addr, uap->len, prot,
156 flags, uap->fd, (vm_offset_t) uap->pos);
157#endif
158 /*
159 * Address (if FIXED) must be page aligned. Size is implicitly rounded
160 * to a page boundary.
161 */
162 addr = (vm_offset_t) uap->addr;
163 if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) ||
164 (ssize_t) uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
165 return (EINVAL);

--- 147 unchanged lines hidden (view full) ---

313 int *retval;
314{
315 vm_offset_t addr;
316 vm_size_t size;
317 int flags;
318 vm_map_t map;
319 int rv;
320
144 /*
145 * Address (if FIXED) must be page aligned. Size is implicitly rounded
146 * to a page boundary.
147 */
148 addr = (vm_offset_t) uap->addr;
149 if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) ||
150 (ssize_t) uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
151 return (EINVAL);

--- 147 unchanged lines hidden (view full) ---

299 int *retval;
300{
301 vm_offset_t addr;
302 vm_size_t size;
303 int flags;
304 vm_map_t map;
305 int rv;
306
321#ifdef DEBUG
322 if (mmapdebug & (MDB_FOLLOW | MDB_SYNC))
323 printf("msync(%d): addr %x len %x\n",
324 p->p_pid, uap->addr, uap->len);
325#endif
326
327 map = &p->p_vmspace->vm_map;
328 addr = (vm_offset_t) uap->addr;
329 size = (vm_size_t) uap->len;
330 flags = uap->flags;
331
332 if (((int) addr & PAGE_MASK) || addr + size < addr ||
333 (flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
334 return (EINVAL);

--- 12 unchanged lines hidden (view full) ---

347 rv = vm_map_lookup_entry(map, addr, &entry);
348 vm_map_unlock_read(map);
349 if (rv == FALSE)
350 return (EINVAL);
351 addr = entry->start;
352 size = entry->end - entry->start;
353 }
354
307 map = &p->p_vmspace->vm_map;
308 addr = (vm_offset_t) uap->addr;
309 size = (vm_size_t) uap->len;
310 flags = uap->flags;
311
312 if (((int) addr & PAGE_MASK) || addr + size < addr ||
313 (flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
314 return (EINVAL);

--- 12 unchanged lines hidden (view full) ---

327 rv = vm_map_lookup_entry(map, addr, &entry);
328 vm_map_unlock_read(map);
329 if (rv == FALSE)
330 return (EINVAL);
331 addr = entry->start;
332 size = entry->end - entry->start;
333 }
334
355#ifdef DEBUG
356 if (mmapdebug & MDB_SYNC)
357 printf("msync: cleaning/flushing address range [%x-%x)\n",
358 addr, addr + size);
359#endif
360
361 /*
362 * Clean the pages and interpret the return value.
363 */
364 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
365 (flags & MS_INVALIDATE) != 0);
366
367 switch (rv) {
368 case KERN_SUCCESS:

--- 18 unchanged lines hidden (view full) ---

387 register struct proc *p;
388 register struct munmap_args *uap;
389 int *retval;
390{
391 vm_offset_t addr;
392 vm_size_t size;
393 vm_map_t map;
394
335 /*
336 * Clean the pages and interpret the return value.
337 */
338 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
339 (flags & MS_INVALIDATE) != 0);
340
341 switch (rv) {
342 case KERN_SUCCESS:

--- 18 unchanged lines hidden (view full) ---

361 register struct proc *p;
362 register struct munmap_args *uap;
363 int *retval;
364{
365 vm_offset_t addr;
366 vm_size_t size;
367 vm_map_t map;
368
395#ifdef DEBUG
396 if (mmapdebug & MDB_FOLLOW)
397 printf("munmap(%d): addr %x len %x\n",
398 p->p_pid, uap->addr, uap->len);
399#endif
400
401 addr = (vm_offset_t) uap->addr;
402 if ((addr & PAGE_MASK) || uap->len < 0)
403 return (EINVAL);
404 size = (vm_size_t) round_page(uap->len);
405 if (size == 0)
406 return (0);
407 /*
408 * Check for illegal addresses. Watch out for address wrap... Note

--- 18 unchanged lines hidden (view full) ---

427 return (0);
428}
429
430void
431munmapfd(p, fd)
432 struct proc *p;
433 int fd;
434{
369 addr = (vm_offset_t) uap->addr;
370 if ((addr & PAGE_MASK) || uap->len < 0)
371 return (EINVAL);
372 size = (vm_size_t) round_page(uap->len);
373 if (size == 0)
374 return (0);
375 /*
376 * Check for illegal addresses. Watch out for address wrap... Note

--- 18 unchanged lines hidden (view full) ---

395 return (0);
396}
397
398void
399munmapfd(p, fd)
400 struct proc *p;
401 int fd;
402{
435#ifdef DEBUG
436 if (mmapdebug & MDB_FOLLOW)
437 printf("munmapfd(%d): fd %d\n", p->p_pid, fd);
438#endif
439
440 /*
441 * XXX should unmap any regions mapped to this file
442 */
443 p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
444}
445
446struct mprotect_args {
447 caddr_t addr;

--- 5 unchanged lines hidden (view full) ---

453 struct proc *p;
454 struct mprotect_args *uap;
455 int *retval;
456{
457 vm_offset_t addr;
458 vm_size_t size;
459 register vm_prot_t prot;
460
403 /*
404 * XXX should unmap any regions mapped to this file
405 */
406 p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
407}
408
409struct mprotect_args {
410 caddr_t addr;

--- 5 unchanged lines hidden (view full) ---

416 struct proc *p;
417 struct mprotect_args *uap;
418 int *retval;
419{
420 vm_offset_t addr;
421 vm_size_t size;
422 register vm_prot_t prot;
423
461#ifdef DEBUG
462 if (mmapdebug & MDB_FOLLOW)
463 printf("mprotect(%d): addr %x len %x prot %d\n",
464 p->p_pid, uap->addr, uap->len, uap->prot);
465#endif
466
467 addr = (vm_offset_t) uap->addr;
468 if ((addr & PAGE_MASK) || uap->len < 0)
469 return (EINVAL);
470 size = (vm_size_t) uap->len;
471 prot = uap->prot & VM_PROT_ALL;
472
473 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
474 FALSE)) {

--- 50 unchanged lines hidden (view full) ---

525 struct proc *p;
526 struct mlock_args *uap;
527 int *retval;
528{
529 vm_offset_t addr;
530 vm_size_t size;
531 int error;
532
424 addr = (vm_offset_t) uap->addr;
425 if ((addr & PAGE_MASK) || uap->len < 0)
426 return (EINVAL);
427 size = (vm_size_t) uap->len;
428 prot = uap->prot & VM_PROT_ALL;
429
430 switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
431 FALSE)) {

--- 50 unchanged lines hidden (view full) ---

482 struct proc *p;
483 struct mlock_args *uap;
484 int *retval;
485{
486 vm_offset_t addr;
487 vm_size_t size;
488 int error;
489
533#ifdef DEBUG
534 if (mmapdebug & MDB_FOLLOW)
535 printf("mlock(%d): addr %x len %x\n",
536 p->p_pid, uap->addr, uap->len);
537#endif
538 addr = (vm_offset_t) uap->addr;
539 if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
540 return (EINVAL);
541 size = round_page((vm_size_t) uap->len);
542 if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
543 return (EAGAIN);
544#ifdef pmap_wired_count
545 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >

--- 18 unchanged lines hidden (view full) ---

564 struct proc *p;
565 struct munlock_args *uap;
566 int *retval;
567{
568 vm_offset_t addr;
569 vm_size_t size;
570 int error;
571
490 addr = (vm_offset_t) uap->addr;
491 if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
492 return (EINVAL);
493 size = round_page((vm_size_t) uap->len);
494 if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
495 return (EAGAIN);
496#ifdef pmap_wired_count
497 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >

--- 18 unchanged lines hidden (view full) ---

516 struct proc *p;
517 struct munlock_args *uap;
518 int *retval;
519{
520 vm_offset_t addr;
521 vm_size_t size;
522 int error;
523
572#ifdef DEBUG
573 if (mmapdebug & MDB_FOLLOW)
574 printf("munlock(%d): addr %x len %x\n",
575 p->p_pid, uap->addr, uap->len);
576#endif
577 addr = (vm_offset_t) uap->addr;
578 if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
579 return (EINVAL);
580#ifndef pmap_wired_count
581 error = suser(p->p_ucred, &p->p_acflag);
582 if (error)
583 return (error);
584#endif

--- 13 unchanged lines hidden (view full) ---

598 register vm_map_t map;
599 register vm_offset_t *addr;
600 register vm_size_t size;
601 vm_prot_t prot, maxprot;
602 register int flags;
603 caddr_t handle; /* XXX should be vp */
604 vm_offset_t foff;
605{
524 addr = (vm_offset_t) uap->addr;
525 if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
526 return (EINVAL);
527#ifndef pmap_wired_count
528 error = suser(p->p_ucred, &p->p_acflag);
529 if (error)
530 return (error);
531#endif

--- 13 unchanged lines hidden (view full) ---

545 register vm_map_t map;
546 register vm_offset_t *addr;
547 register vm_size_t size;
548 vm_prot_t prot, maxprot;
549 register int flags;
550 caddr_t handle; /* XXX should be vp */
551 vm_offset_t foff;
552{
606 register vm_pager_t pager;
607 boolean_t fitit;
608 vm_object_t object;
609 struct vnode *vp = NULL;
553 boolean_t fitit;
554 vm_object_t object;
555 struct vnode *vp = NULL;
610 int type;
556 objtype_t type;
611 int rv = KERN_SUCCESS;
612 vm_size_t objsize;
613 struct proc *p = curproc;
614
615 if (size == 0)
616 return (0);
617
618 objsize = size = round_page(size);

--- 15 unchanged lines hidden (view full) ---

634 } else {
635 if (*addr != trunc_page(*addr))
636 return (EINVAL);
637 fitit = FALSE;
638 (void) vm_map_remove(map, *addr, *addr + size);
639 }
640
641 /*
557 int rv = KERN_SUCCESS;
558 vm_size_t objsize;
559 struct proc *p = curproc;
560
561 if (size == 0)
562 return (0);
563
564 objsize = size = round_page(size);

--- 15 unchanged lines hidden (view full) ---

580 } else {
581 if (*addr != trunc_page(*addr))
582 return (EINVAL);
583 fitit = FALSE;
584 (void) vm_map_remove(map, *addr, *addr + size);
585 }
586
587 /*
642 * Lookup/allocate pager. All except an unnamed anonymous lookup gain
643 * a reference to ensure continued existance of the object. (XXX the
644 * exception is to appease the pageout daemon)
588 * Lookup/allocate object.
645 */
646 if (flags & MAP_ANON) {
589 */
590 if (flags & MAP_ANON) {
647 type = PG_DFLT;
591 type = OBJT_SWAP;
648 /*
649 * Unnamed anonymous regions always start at 0.
650 */
651 if (handle == 0)
652 foff = 0;
653 } else {
654 vp = (struct vnode *) handle;
655 if (vp->v_type == VCHR) {
592 /*
593 * Unnamed anonymous regions always start at 0.
594 */
595 if (handle == 0)
596 foff = 0;
597 } else {
598 vp = (struct vnode *) handle;
599 if (vp->v_type == VCHR) {
656 type = PG_DEVICE;
600 type = OBJT_DEVICE;
657 handle = (caddr_t) vp->v_rdev;
658 } else {
659 struct vattr vat;
660 int error;
661
662 error = VOP_GETATTR(vp, &vat, p->p_ucred, p);
663 if (error)
664 return (error);
665 objsize = vat.va_size;
601 handle = (caddr_t) vp->v_rdev;
602 } else {
603 struct vattr vat;
604 int error;
605
606 error = VOP_GETATTR(vp, &vat, p->p_ucred, p);
607 if (error)
608 return (error);
609 objsize = vat.va_size;
666 type = PG_VNODE;
610 type = OBJT_VNODE;
667 }
668 }
611 }
612 }
669 pager = vm_pager_allocate(type, handle, objsize, prot, foff);
670 if (pager == NULL)
671 return (type == PG_DEVICE ? EINVAL : ENOMEM);
672 /*
673 * Guarantee that the pager has an object.
674 */
675 object = vm_object_lookup(pager);
676 if (object == NULL) {
677 if (handle != NULL)
678 panic("vm_mmap: pager didn't allocate an object (and should have)");
679 /*
680 * Should only happen for unnamed anonymous regions.
681 */
682 object = vm_object_allocate(size);
683 object->pager = pager;
684 } else {
685 /*
686 * Lose vm_object_lookup() reference. Retain reference
687 * gained by vm_pager_allocate().
688 */
689 vm_object_deallocate(object);
690 }
691 /*
692 * At this point, our actions above have gained a total of
693 * one reference to the object, and we have a pager.
694 */
613 object = vm_pager_allocate(type, handle, objsize, prot, foff);
614 if (object == NULL)
615 return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
695
696 /*
697 * Anonymous memory, shared file, or character special file.
698 */
616
617 /*
618 * Anonymous memory, shared file, or character special file.
619 */
699 if ((flags & (MAP_ANON|MAP_SHARED)) || (type == PG_DEVICE)) {
620 if ((flags & (MAP_ANON|MAP_SHARED)) || (type == OBJT_DEVICE)) {
700 rv = vm_map_find(map, object, foff, addr, size, fitit);
701 if (rv != KERN_SUCCESS) {
702 /*
621 rv = vm_map_find(map, object, foff, addr, size, fitit);
622 if (rv != KERN_SUCCESS) {
623 /*
703 * Lose the object reference. This will also destroy
704 * the pager if there are no other references.
624 * Lose the object reference. Will destroy the
625 * object if it's an unnamed anonymous mapping
626 * or named anonymous without other references.
705 */
706 vm_object_deallocate(object);
707 goto out;
708 }
709 }
710 /*
711 * mmap a COW regular file
712 */
713 else {
627 */
628 vm_object_deallocate(object);
629 goto out;
630 }
631 }
632 /*
633 * mmap a COW regular file
634 */
635 else {
714 vm_map_t tmap;
715 vm_offset_t off;
716 vm_map_entry_t entry;
636 vm_map_entry_t entry;
637 vm_object_t private_object;
717
638
718 if (flags & MAP_COPY) {
719 /* locate and allocate the target address space */
720 rv = vm_map_find(map, NULL, 0, addr, size, fitit);
721 if (rv != KERN_SUCCESS) {
722 vm_object_deallocate(object);
723 goto out;
724 }
639 /*
640 * Create a new object and make the original object
641 * the backing object. NOTE: the object reference gained
642 * above is now changed into the reference held by
643 * private_object. Since we don't map 'object', we want
644 * only this one reference.
645 */
646 private_object = vm_object_allocate(OBJT_DEFAULT, object->size);
647 private_object->backing_object = object;
648 TAILQ_INSERT_TAIL(&object->shadow_head,
649 private_object, shadow_list);
725
650
726 off = VM_MIN_ADDRESS;
727 tmap = vm_map_create(NULL, off, off + size, TRUE);
728 rv = vm_map_find(tmap, object, foff, &off, size, FALSE);
729 if (rv != KERN_SUCCESS) {
730 /*
731 * Deallocate and delete the temporary map.
732 * Note that since the object insertion
733 * above has failed, the vm_map_deallocate
734 * doesn't lose the object reference - we
735 * must do it explicitly.
736 */
737 vm_object_deallocate(object);
738 vm_map_deallocate(tmap);
739 goto out;
740 }
741 rv = vm_map_copy(map, tmap, *addr, size, off,
742 FALSE, FALSE);
743 /*
744 * Deallocate temporary map. XXX - depending
745 * on events, this may leave the object with
746 * no net gain in reference count! ...this
747 * needs to be looked at!
748 */
749 vm_map_deallocate(tmap);
750 if (rv != KERN_SUCCESS)
751 goto out;
651 rv = vm_map_find(map, private_object, foff, addr, size, fitit);
652 if (rv != KERN_SUCCESS) {
653 vm_object_deallocate(private_object);
654 goto out;
655 }
752
656
753 } else {
754 vm_object_t user_object;
755
756 /*
757 * Create a new object and make the original object
758 * the backing object. NOTE: the object reference gained
759 * above is now changed into the reference held by
760 * user_object. Since we don't map 'object', we want
761 * only this one reference.
762 */
763 user_object = vm_object_allocate(object->size);
764 user_object->shadow = object;
765 TAILQ_INSERT_TAIL(&object->reverse_shadow_head,
766 user_object, reverse_shadow_list);
767
768 rv = vm_map_find(map, user_object, foff, addr, size, fitit);
769 if( rv != KERN_SUCCESS) {
770 vm_object_deallocate(user_object);
771 goto out;
772 }
773
774 /*
775 * this is a consistancy check, gets the map entry, and should
776 * never fail
777 */
778 if (!vm_map_lookup_entry(map, *addr, &entry)) {
779 panic("vm_mmap: missing map entry!!!");
780 }
781
782 entry->copy_on_write = TRUE;
657 if (!vm_map_lookup_entry(map, *addr, &entry)) {
658 panic("vm_mmap: missing map entry!!!");
783 }
659 }
660 entry->copy_on_write = TRUE;
784
785 /*
786 * set pages COW and protect for read access only
787 */
788 vm_object_pmap_copy(object, foff, foff + size);
789
790 }
791
792 /*
793 * "Pre-fault" resident pages.
794 */
661
662 /*
663 * set pages COW and protect for read access only
664 */
665 vm_object_pmap_copy(object, foff, foff + size);
666
667 }
668
669 /*
670 * "Pre-fault" resident pages.
671 */
795 if ((type == PG_VNODE) && (map->pmap != NULL)) {
672 if ((type == OBJT_VNODE) && (map->pmap != NULL)) {
796 pmap_object_init_pt(map->pmap, *addr, object, foff, size);
797 }
798
799 /*
800 * Correct protection (default is VM_PROT_ALL). If maxprot is
801 * different than prot, we must set both explicitly.
802 */
803 rv = KERN_SUCCESS;

--- 11 unchanged lines hidden (view full) ---

815 if (flags & MAP_SHARED) {
816 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
817 if (rv != KERN_SUCCESS) {
818 (void) vm_map_remove(map, *addr, *addr + size);
819 goto out;
820 }
821 }
822out:
673 pmap_object_init_pt(map->pmap, *addr, object, foff, size);
674 }
675
676 /*
677 * Correct protection (default is VM_PROT_ALL). If maxprot is
678 * different than prot, we must set both explicitly.
679 */
680 rv = KERN_SUCCESS;

--- 11 unchanged lines hidden (view full) ---

692 if (flags & MAP_SHARED) {
693 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
694 if (rv != KERN_SUCCESS) {
695 (void) vm_map_remove(map, *addr, *addr + size);
696 goto out;
697 }
698 }
699out:
823#ifdef DEBUG
824 if (mmapdebug & MDB_MAPIT)
825 printf("vm_mmap: rv %d\n", rv);
826#endif
827 switch (rv) {
828 case KERN_SUCCESS:
829 return (0);
830 case KERN_INVALID_ADDRESS:
831 case KERN_NO_SPACE:
832 return (ENOMEM);
833 case KERN_PROTECTION_FAILURE:
834 return (EACCES);
835 default:
836 return (EINVAL);
837 }
838}
700 switch (rv) {
701 case KERN_SUCCESS:
702 return (0);
703 case KERN_INVALID_ADDRESS:
704 case KERN_NO_SPACE:
705 return (ENOMEM);
706 case KERN_PROTECTION_FAILURE:
707 return (EACCES);
708 default:
709 return (EINVAL);
710 }
711}