Lines Matching refs:vm

89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vnode_pager.h>
100 #include <vm/swap_pager.h>
101 #include <vm/uma.h>
261 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
292 struct vmspace *vm;
295 vm = (struct vmspace *)mem;
296 map = &vm->vm_map;
299 mtx_init(&map->system_mtx, "vm map (system)", NULL,
301 sx_init(&map->lock, "vm map (user)");
302 PMAP_LOCK_INIT(vmspace_pmap(vm));
310 struct vmspace *vm;
312 vm = (struct vmspace *)mem;
313 KASSERT(vm->vm_map.nentries == 0,
314 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
315 KASSERT(vm->vm_map.size == 0,
316 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
327 struct vmspace *vm;
329 vm = uma_zalloc(vmspace_zone, M_WAITOK);
330 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
331 if (!pinit(vmspace_pmap(vm))) {
332 uma_zfree(vmspace_zone, vm);
335 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
336 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
337 refcount_init(&vm->vm_refcnt, 1);
338 vm->vm_shm = NULL;
339 vm->vm_swrss = 0;
340 vm->vm_tsize = 0;
341 vm->vm_dsize = 0;
342 vm->vm_ssize = 0;
343 vm->vm_taddr = 0;
344 vm->vm_daddr = 0;
345 vm->vm_maxsaddr = 0;
346 return (vm);
365 vmspace_dofree(struct vmspace *vm)
368 CTR1(KTR_VM, "vmspace_free: %p", vm);
374 shmexit(vm);
381 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
382 vm_map_max(&vm->vm_map));
384 pmap_release(vmspace_pmap(vm));
385 vm->vm_map.pmap = NULL;
386 uma_zfree(vmspace_zone, vm);
390 vmspace_free(struct vmspace *vm)
396 if (refcount_release(&vm->vm_refcnt))
397 vmspace_dofree(vm);
403 struct vmspace *vm;
406 vm = p->p_vmspace;
409 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
410 vmspace_free(vm);
416 struct vmspace *vm;
421 vm = p->p_vmspace;
431 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) {
438 released = refcount_release(&vm->vm_refcnt);
445 if (p->p_vmspace != vm) {
447 p->p_vmspace = vm;
451 pmap_remove_pages(vmspace_pmap(vm));
456 vmspace_dofree(vm);
469 struct vmspace *vm;
472 vm = p->p_vmspace;
473 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) {
477 if (vm != p->p_vmspace) {
479 vmspace_free(vm);
483 return (vm);
638 &enable_vmmap_check, 0, "Enable vm map consistency checking");
916 mtx_init(&map->system_mtx, "vm map (system)", NULL,
918 sx_init(&map->lock, "vm map (user)");
4591 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we
4599 struct vmspace *vm;
4614 vm = p->p_vmspace;
4667 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4669 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4676 ctob(vm->vm_ssize) + grow_amount)) {
4687 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4689 ctob(vm->vm_ssize);
4696 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4697 grow_amount = limit - ctob(vm->vm_ssize);
4795 vm->vm_ssize += btoc(grow_amount);
4818 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));