Deleted Added
full compact
vm_kern.c (15722) vm_kern.c (15809)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_kern.c,v 1.23 1996/04/24 04:16:44 dyson Exp $
64 * $Id: vm_kern.c,v 1.24 1996/05/10 19:28:54 wollman Exp $
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 22 unchanged lines hidden (view full) ---

95vm_map_t mb_map;
96int mb_map_full;
97vm_map_t mcl_map;
98int mcl_map_full;
99vm_map_t io_map;
100vm_map_t clean_map;
101vm_map_t phys_map;
102vm_map_t exec_map;
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 22 unchanged lines hidden (view full) ---

95vm_map_t mb_map;
96int mb_map_full;
97vm_map_t mcl_map;
98int mcl_map_full;
99vm_map_t io_map;
100vm_map_t clean_map;
101vm_map_t phys_map;
102vm_map_t exec_map;
103vm_map_t exech_map;
103vm_map_t u_map;
104
105/*
106 * kmem_alloc_pageable:
107 *
108 * Allocate pageable memory to the kernel's address map.
109 * "map" must be kernel_map or a submap of kernel_map.
110 */

--- 211 unchanged lines hidden (view full) ---

322 panic("kmem_malloc: kmem_map too small");
323 return (0);
324 }
325 offset = addr - VM_MIN_KERNEL_ADDRESS;
326 vm_object_reference(kmem_object);
327 vm_map_insert(map, kmem_object, offset, addr, addr + size,
328 VM_PROT_ALL, VM_PROT_ALL, 0);
329
104vm_map_t u_map;
105
106/*
107 * kmem_alloc_pageable:
108 *
109 * Allocate pageable memory to the kernel's address map.
110 * "map" must be kernel_map or a submap of kernel_map.
111 */

--- 211 unchanged lines hidden (view full) ---

323 panic("kmem_malloc: kmem_map too small");
324 return (0);
325 }
326 offset = addr - VM_MIN_KERNEL_ADDRESS;
327 vm_object_reference(kmem_object);
328 vm_map_insert(map, kmem_object, offset, addr, addr + size,
329 VM_PROT_ALL, VM_PROT_ALL, 0);
330
330 /*
331 * If we can wait, just mark the range as wired (will fault pages as
332 * necessary).
333 */
334 if (waitflag == M_WAITOK) {
335 vm_map_unlock(map);
336 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
337 FALSE);
338 vm_map_simplify(map, addr);
339 return (addr);
340 }
341 /*
342 * If we cannot wait then we must allocate all memory up front,
343 * pulling it off the active queue to prevent pageout.
344 */
345 for (i = 0; i < size; i += PAGE_SIZE) {
331 for (i = 0; i < size; i += PAGE_SIZE) {
332retry:
346 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
347 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
348
349 /*
350 * Ran out of space, free everything up and return. Don't need
351 * to lock page queues here as we know that the pages we got
352 * aren't on any queues.
353 */
354 if (m == NULL) {
333 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
334 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
335
336 /*
337 * Ran out of space, free everything up and return. Don't need
338 * to lock page queues here as we know that the pages we got
339 * aren't on any queues.
340 */
341 if (m == NULL) {
342 if (waitflag == M_WAITOK) {
343 VM_WAIT;
344 goto retry;
345 }
355 while (i != 0) {
356 i -= PAGE_SIZE;
357 m = vm_page_lookup(kmem_object,
358 OFF_TO_IDX(offset + i));
359 vm_page_free(m);
360 }
361 vm_map_delete(map, addr, addr + size);
362 vm_map_unlock(map);
363 return (0);
364 }
346 while (i != 0) {
347 i -= PAGE_SIZE;
348 m = vm_page_lookup(kmem_object,
349 OFF_TO_IDX(offset + i));
350 vm_page_free(m);
351 }
352 vm_map_delete(map, addr, addr + size);
353 vm_map_unlock(map);
354 return (0);
355 }
365 m->flags &= ~(PG_BUSY|PG_ZERO);
356 m->flags &= ~PG_ZERO;
366 m->valid = VM_PAGE_BITS_ALL;
367 }
368
369 /*
370 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
371 * be able to extend the previous entry so there will be a new entry
372 * exactly corresponding to this address range and it will have
373 * wired_count == 0.

--- 7 unchanged lines hidden (view full) ---

381 /*
382 * Loop thru pages, entering them in the pmap. (We cannot add them to
383 * the wired count without wrapping the vm_page_queue_lock in
384 * splimp...)
385 */
386 for (i = 0; i < size; i += PAGE_SIZE) {
387 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
388 vm_page_wire(m);
357 m->valid = VM_PAGE_BITS_ALL;
358 }
359
360 /*
361 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
362 * be able to extend the previous entry so there will be a new entry
363 * exactly corresponding to this address range and it will have
364 * wired_count == 0.

--- 7 unchanged lines hidden (view full) ---

372 /*
373 * Loop thru pages, entering them in the pmap. (We cannot add them to
374 * the wired count without wrapping the vm_page_queue_lock in
375 * splimp...)
376 */
377 for (i = 0; i < size; i += PAGE_SIZE) {
378 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
379 vm_page_wire(m);
389 pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
380 PAGE_WAKEUP(m);
381 pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
382 VM_PROT_ALL, 1);
390 }
391 vm_map_unlock(map);
392
393 vm_map_simplify(map, addr);
394 return (addr);
395}
396
397/*

--- 75 unchanged lines hidden ---
383 }
384 vm_map_unlock(map);
385
386 vm_map_simplify(map, addr);
387 return (addr);
388}
389
390/*

--- 75 unchanged lines hidden ---