Deleted Added
full compact
vm_kern.c (8876) vm_kern.c (9507)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_kern.c,v 1.12 1995/03/15 07:52:06 davidg Exp $
64 * $Id: vm_kern.c,v 1.13 1995/05/30 08:16:04 rgrimes Exp $
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 98 unchanged lines hidden (view full) ---

171 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
172 * we get the data back from the pager, it will be (very stale)
173 * non-zero data. kmem_alloc is defined to return zero-filled memory.
174 *
175 * We're intentionally not activating the pages we allocate to prevent a
176 * race with page-out. vm_map_pageable will wire the pages.
177 */
178
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 98 unchanged lines hidden (view full) ---

171 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
172 * we get the data back from the pager, it will be (very stale)
173 * non-zero data. kmem_alloc is defined to return zero-filled memory.
174 *
175 * We're intentionally not activating the pages we allocate to prevent a
176 * race with page-out. vm_map_pageable will wire the pages.
177 */
178
179 vm_object_lock(kernel_object);
180 for (i = 0; i < size; i += PAGE_SIZE) {
181 vm_page_t mem;
182
183 while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) {
179 for (i = 0; i < size; i += PAGE_SIZE) {
180 vm_page_t mem;
181
182 while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) {
184 vm_object_unlock(kernel_object);
185 VM_WAIT;
183 VM_WAIT;
186 vm_object_lock(kernel_object);
187 }
188 vm_page_zero_fill(mem);
189 mem->flags &= ~PG_BUSY;
190 mem->valid = VM_PAGE_BITS_ALL;
191 }
184 }
185 vm_page_zero_fill(mem);
186 mem->flags &= ~PG_BUSY;
187 mem->valid = VM_PAGE_BITS_ALL;
188 }
192 vm_object_unlock(kernel_object);
193
194 /*
195 * And finally, mark the data as non-pageable.
196 */
197
198 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
199
200 /*

--- 126 unchanged lines hidden (view full) ---

327 FALSE);
328 vm_map_simplify(map, addr);
329 return (addr);
330 }
331 /*
332 * If we cannot wait then we must allocate all memory up front,
333 * pulling it off the active queue to prevent pageout.
334 */
189
190 /*
191 * And finally, mark the data as non-pageable.
192 */
193
194 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
195
196 /*

--- 126 unchanged lines hidden (view full) ---

323 FALSE);
324 vm_map_simplify(map, addr);
325 return (addr);
326 }
327 /*
328 * If we cannot wait then we must allocate all memory up front,
329 * pulling it off the active queue to prevent pageout.
330 */
335 vm_object_lock(kmem_object);
336 for (i = 0; i < size; i += PAGE_SIZE) {
337 m = vm_page_alloc(kmem_object, offset + i,
338 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
339
340 /*
341 * Ran out of space, free everything up and return. Don't need
342 * to lock page queues here as we know that the pages we got
343 * aren't on any queues.
344 */
345 if (m == NULL) {
346 while (i != 0) {
347 i -= PAGE_SIZE;
348 m = vm_page_lookup(kmem_object, offset + i);
349 vm_page_free(m);
350 }
331 for (i = 0; i < size; i += PAGE_SIZE) {
332 m = vm_page_alloc(kmem_object, offset + i,
333 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
334
335 /*
336 * Ran out of space, free everything up and return. Don't need
337 * to lock page queues here as we know that the pages we got
338 * aren't on any queues.
339 */
340 if (m == NULL) {
341 while (i != 0) {
342 i -= PAGE_SIZE;
343 m = vm_page_lookup(kmem_object, offset + i);
344 vm_page_free(m);
345 }
351 vm_object_unlock(kmem_object);
352 vm_map_delete(map, addr, addr + size);
353 vm_map_unlock(map);
354 return (0);
355 }
356#if 0
357 vm_page_zero_fill(m);
358#endif
359 m->flags &= ~PG_BUSY;
360 m->valid = VM_PAGE_BITS_ALL;
361 }
346 vm_map_delete(map, addr, addr + size);
347 vm_map_unlock(map);
348 return (0);
349 }
350#if 0
351 vm_page_zero_fill(m);
352#endif
353 m->flags &= ~PG_BUSY;
354 m->valid = VM_PAGE_BITS_ALL;
355 }
362 vm_object_unlock(kmem_object);
363
364 /*
365 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
366 * be able to extend the previous entry so there will be a new entry
367 * exactly corresponding to this address range and it will have
368 * wired_count == 0.
369 */
370 if (!vm_map_lookup_entry(map, addr, &entry) ||
371 entry->start != addr || entry->end != addr + size ||
372 entry->wired_count)
373 panic("kmem_malloc: entry not found or misaligned");
374 entry->wired_count++;
375
376 /*
377 * Loop thru pages, entering them in the pmap. (We cannot add them to
378 * the wired count without wrapping the vm_page_queue_lock in
379 * splimp...)
380 */
381 for (i = 0; i < size; i += PAGE_SIZE) {
356
357 /*
358 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
359 * be able to extend the previous entry so there will be a new entry
360 * exactly corresponding to this address range and it will have
361 * wired_count == 0.
362 */
363 if (!vm_map_lookup_entry(map, addr, &entry) ||
364 entry->start != addr || entry->end != addr + size ||
365 entry->wired_count)
366 panic("kmem_malloc: entry not found or misaligned");
367 entry->wired_count++;
368
369 /*
370 * Loop thru pages, entering them in the pmap. (We cannot add them to
371 * the wired count without wrapping the vm_page_queue_lock in
372 * splimp...)
373 */
374 for (i = 0; i < size; i += PAGE_SIZE) {
382 vm_object_lock(kmem_object);
383 m = vm_page_lookup(kmem_object, offset + i);
375 m = vm_page_lookup(kmem_object, offset + i);
384 vm_object_unlock(kmem_object);
385 pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
386 }
387 vm_map_unlock(map);
388
389 vm_map_simplify(map, addr);
390 return (addr);
391}
392

--- 21 unchanged lines hidden (view full) ---

414 vm_map_lock(map);
415 if (vm_map_findspace(map, 0, size, &addr) == 0)
416 break;
417 /* no space now; see if we can ever get space */
418 if (vm_map_max(map) - vm_map_min(map) < size) {
419 vm_map_unlock(map);
420 return (0);
421 }
376 pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
377 }
378 vm_map_unlock(map);
379
380 vm_map_simplify(map, addr);
381 return (addr);
382}
383

--- 21 unchanged lines hidden (view full) ---

405 vm_map_lock(map);
406 if (vm_map_findspace(map, 0, size, &addr) == 0)
407 break;
408 /* no space now; see if we can ever get space */
409 if (vm_map_max(map) - vm_map_min(map) < size) {
410 vm_map_unlock(map);
411 return (0);
412 }
422 assert_wait((int) map, TRUE);
423 vm_map_unlock(map);
413 vm_map_unlock(map);
424 thread_block("kmaw");
414 tsleep(map, PVM, "kmaw", 0);
425 }
426 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
427 vm_map_unlock(map);
428 return (addr);
429}
430
431/*
432 * kmem_free_wakeup
433 *
415 }
416 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
417 vm_map_unlock(map);
418 return (addr);
419}
420
421/*
422 * kmem_free_wakeup
423 *
434 * Returns memory to a submap of the kernel, and wakes up any threads
424 * Returns memory to a submap of the kernel, and wakes up any processes
435 * waiting for memory in that map.
436 */
437void
438kmem_free_wakeup(map, addr, size)
439 vm_map_t map;
440 vm_offset_t addr;
441 vm_size_t size;
442{
443 vm_map_lock(map);
444 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
425 * waiting for memory in that map.
426 */
427void
428kmem_free_wakeup(map, addr, size)
429 vm_map_t map;
430 vm_offset_t addr;
431 vm_size_t size;
432{
433 vm_map_lock(map);
434 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
445 thread_wakeup((int) map);
435 wakeup(map);
446 vm_map_unlock(map);
447}
448
449/*
450 * Create the kernel map; insert a mapping covering kernel text, data, bss,
451 * and all space allocated thus far (`boostrap' data). The new map will thus
452 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
453 * the range between `start' and `end' as free.

--- 16 unchanged lines hidden ---
436 vm_map_unlock(map);
437}
438
439/*
440 * Create the kernel map; insert a mapping covering kernel text, data, bss,
441 * and all space allocated thus far (`boostrap' data). The new map will thus
442 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
443 * the range between `start' and `end' as free.

--- 16 unchanged lines hidden ---