Deleted Added
full compact
vm_object.c (9202) vm_object.c (9507)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_object.c,v 1.47.2.1 1995/06/04 13:53:25 davidg Exp $
64 * $Id: vm_object.c,v 1.48 1995/06/11 19:31:53 rgrimes Exp $
65 */
66
67/*
68 * Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/proc.h> /* for curproc, pageproc */
75#include <sys/malloc.h>
76#include <sys/vnode.h>
77#include <sys/mount.h>
78
79#include <vm/vm.h>
80#include <vm/vm_page.h>
81#include <vm/vm_pageout.h>
82#include <vm/vm_pager.h>
83#include <vm/swap_pager.h>
65 */
66
67/*
68 * Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/proc.h> /* for curproc, pageproc */
75#include <sys/malloc.h>
76#include <sys/vnode.h>
77#include <sys/mount.h>
78
79#include <vm/vm.h>
80#include <vm/vm_page.h>
81#include <vm/vm_pageout.h>
82#include <vm/vm_pager.h>
83#include <vm/swap_pager.h>
84#include <vm/vnode_pager.h>
85#include <vm/vm_kern.h>
86
84#include <vm/vm_kern.h>
85
87static void _vm_object_allocate(vm_size_t, vm_object_t);
86static void _vm_object_allocate(objtype_t, vm_size_t, vm_object_t);
88
89
90/*
91 * Virtual memory objects maintain the actual data
92 * associated with allocated virtual memory. A given
93 * page of memory exists within exactly one object.
94 *
95 * An object is only deallocated when all "references"

--- 18 unchanged lines hidden (view full) ---

114 */
115
116
117struct vm_object kernel_object_store;
118struct vm_object kmem_object_store;
119
120int vm_object_cache_max;
121
87
88
89/*
90 * Virtual memory objects maintain the actual data
91 * associated with allocated virtual memory. A given
92 * page of memory exists within exactly one object.
93 *
94 * An object is only deallocated when all "references"

--- 18 unchanged lines hidden (view full) ---

113 */
114
115
116struct vm_object kernel_object_store;
117struct vm_object kmem_object_store;
118
119int vm_object_cache_max;
120
122#define VM_OBJECT_HASH_COUNT 1021
123
124struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
125#define OBJECT_HASH(pager) ((unsigned long)(pager) % VM_OBJECT_HASH_COUNT)
126
127long object_collapses;
128long object_bypasses;
129
130static void
121long object_collapses;
122long object_bypasses;
123
124static void
131_vm_object_allocate(size, object)
125_vm_object_allocate(type, size, object)
126 objtype_t type;
132 vm_size_t size;
133 register vm_object_t object;
134{
135 TAILQ_INIT(&object->memq);
127 vm_size_t size;
128 register vm_object_t object;
129{
130 TAILQ_INIT(&object->memq);
136 TAILQ_INIT(&object->reverse_shadow_head);
131 TAILQ_INIT(&object->shadow_head);
137
132
133 object->type = type;
138 object->size = size;
139 object->ref_count = 1;
134 object->size = size;
135 object->ref_count = 1;
140 vm_object_lock_init(object);
141 object->flags = OBJ_INTERNAL; /* pager will reset */
136 object->flags = 0;
142 object->paging_in_progress = 0;
143 object->resident_page_count = 0;
137 object->paging_in_progress = 0;
138 object->resident_page_count = 0;
144
145 object->pager = NULL;
139 object->pg_data = NULL;
140 object->handle = NULL;
146 object->paging_offset = 0;
141 object->paging_offset = 0;
147 object->shadow = NULL;
148 object->shadow_offset = (vm_offset_t) 0;
149 object->copy = NULL;
142 object->backing_object = NULL;
143 object->backing_object_offset = (vm_offset_t) 0;
150
151 object->last_read = 0;
152
144
145 object->last_read = 0;
146
153 simple_lock(&vm_object_list_lock);
154 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
155 vm_object_count++;
147 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
148 vm_object_count++;
156 simple_unlock(&vm_object_list_lock);
157}
158
159/*
160 * vm_object_init:
161 *
162 * Initialize the VM objects module.
163 */
164void
165vm_object_init(vm_offset_t nothing)
166{
167 register int i;
168
169 TAILQ_INIT(&vm_object_cached_list);
170 TAILQ_INIT(&vm_object_list);
171 vm_object_count = 0;
149}
150
151/*
152 * vm_object_init:
153 *
154 * Initialize the VM objects module.
155 */
156void
157vm_object_init(vm_offset_t nothing)
158{
159 register int i;
160
161 TAILQ_INIT(&vm_object_cached_list);
162 TAILQ_INIT(&vm_object_list);
163 vm_object_count = 0;
172 simple_lock_init(&vm_cache_lock);
173 simple_lock_init(&vm_object_list_lock);
174
164
175 vm_object_cache_max = 84;
176 if (cnt.v_page_count > 1000)
177 vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
178
165 vm_object_cache_max = 84;
166 if (cnt.v_page_count > 1000)
167 vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
168
179 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
180 LIST_INIT(&vm_object_hashtable[i]);
181
182 kernel_object = &kernel_object_store;
169 kernel_object = &kernel_object_store;
183 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
170 _vm_object_allocate(OBJT_DEFAULT, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
184 kernel_object);
185
186 kmem_object = &kmem_object_store;
171 kernel_object);
172
173 kmem_object = &kmem_object_store;
187 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
174 _vm_object_allocate(OBJT_DEFAULT, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
188 kmem_object);
189}
190
191/*
192 * vm_object_allocate:
193 *
194 * Returns a new object with the given size.
195 */
196
197vm_object_t
175 kmem_object);
176}
177
178/*
179 * vm_object_allocate:
180 *
181 * Returns a new object with the given size.
182 */
183
184vm_object_t
198vm_object_allocate(size)
185vm_object_allocate(type, size)
186 objtype_t type;
199 vm_size_t size;
200{
201 register vm_object_t result;
202
203 result = (vm_object_t)
204 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
205
206
187 vm_size_t size;
188{
189 register vm_object_t result;
190
191 result = (vm_object_t)
192 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
193
194
207 _vm_object_allocate(size, result);
195 _vm_object_allocate(type, size, result);
208
209 return (result);
210}
211
212
213/*
214 * vm_object_reference:
215 *
216 * Gets another reference to the given object.
217 */
218inline void
219vm_object_reference(object)
220 register vm_object_t object;
221{
222 if (object == NULL)
223 return;
224
196
197 return (result);
198}
199
200
201/*
202 * vm_object_reference:
203 *
204 * Gets another reference to the given object.
205 */
206inline void
207vm_object_reference(object)
208 register vm_object_t object;
209{
210 if (object == NULL)
211 return;
212
225 vm_object_lock(object);
213 if (object->ref_count == 0) {
214 if ((object->flags & OBJ_CANPERSIST) == 0)
215 panic("vm_object_reference: non-persistent object with 0 ref_count");
216 TAILQ_REMOVE(&vm_object_cached_list, object, cached_list);
217 vm_object_cached--;
218 }
226 object->ref_count++;
219 object->ref_count++;
227 vm_object_unlock(object);
228}
229
230/*
231 * vm_object_deallocate:
232 *
233 * Release a reference to the specified object,
234 * gained either through a vm_object_allocate
235 * or a vm_object_reference call. When all references
236 * are gone, storage associated with this object
237 * may be relinquished.
238 *
239 * No object may be locked.
240 */
241void
242vm_object_deallocate(object)
243 vm_object_t object;
244{
245 vm_object_t temp;
220}
221
222/*
223 * vm_object_deallocate:
224 *
225 * Release a reference to the specified object,
226 * gained either through a vm_object_allocate
227 * or a vm_object_reference call. When all references
228 * are gone, storage associated with this object
229 * may be relinquished.
230 *
231 * No object may be locked.
232 */
233void
234vm_object_deallocate(object)
235 vm_object_t object;
236{
237 vm_object_t temp;
246 vm_pager_t pager;
247
248 while (object != NULL) {
249
250 if (object->ref_count == 0)
251 panic("vm_object_deallocate: object deallocated too many times");
252
253 /*
238
239 while (object != NULL) {
240
241 if (object->ref_count == 0)
242 panic("vm_object_deallocate: object deallocated too many times");
243
244 /*
254 * The cache holds a reference (uncounted) to the object; we
255 * must lock it before removing the object.
256 */
257
258 vm_object_cache_lock();
259
260 /*
261 * Lose the reference
262 */
245 * Lose the reference
246 */
263 vm_object_lock(object);
264
265 object->ref_count--;
266
267 if (object->ref_count != 0) {
268 if ((object->ref_count == 1) &&
247 object->ref_count--;
248
249 if (object->ref_count != 0) {
250 if ((object->ref_count == 1) &&
269 (object->flags & OBJ_INTERNAL)) {
251 (object->handle == NULL) &&
252 (object->type == OBJT_DEFAULT ||
253 object->type == OBJT_SWAP)) {
270 vm_object_t robject;
254 vm_object_t robject;
271 robject = object->reverse_shadow_head.tqh_first;
255 robject = object->shadow_head.tqh_first;
272 if ((robject != NULL) &&
256 if ((robject != NULL) &&
273 (robject->flags & OBJ_INTERNAL)) {
257 (robject->handle == NULL) &&
258 (robject->type == OBJT_DEFAULT ||
259 robject->type == OBJT_SWAP)) {
274 int s;
275 robject->ref_count += 2;
276 object->ref_count += 2;
277
278 do {
279 s = splhigh();
280 while (robject->paging_in_progress) {
281 robject->flags |= OBJ_PIPWNT;

--- 6 unchanged lines hidden (view full) ---

288 }
289 splx(s);
290
291 } while( object->paging_in_progress || robject->paging_in_progress);
292
293 object->ref_count -= 2;
294 robject->ref_count -= 2;
295 if( robject->ref_count == 0) {
260 int s;
261 robject->ref_count += 2;
262 object->ref_count += 2;
263
264 do {
265 s = splhigh();
266 while (robject->paging_in_progress) {
267 robject->flags |= OBJ_PIPWNT;

--- 6 unchanged lines hidden (view full) ---

274 }
275 splx(s);
276
277 } while( object->paging_in_progress || robject->paging_in_progress);
278
279 object->ref_count -= 2;
280 robject->ref_count -= 2;
281 if( robject->ref_count == 0) {
296 vm_object_unlock(object);
297 vm_object_cache_unlock();
298 robject->ref_count += 1;
299 object = robject;
300 continue;
301 }
282 robject->ref_count += 1;
283 object = robject;
284 continue;
285 }
302 vm_object_cache_unlock();
303 vm_object_unlock(object);
304 vm_object_lock(robject);
305 vm_object_collapse(robject);
306 return;
307 }
308 }
286 vm_object_collapse(robject);
287 return;
288 }
289 }
309 vm_object_unlock(object);
310 /*
311 * If there are still references, then we are done.
312 */
290 /*
291 * If there are still references, then we are done.
292 */
313 vm_object_cache_unlock();
314 return;
315 }
316
293 return;
294 }
295
317 pager = object->pager;
296 if (object->type == OBJT_VNODE) {
297 struct vnode *vp = object->handle;
318
298
319 if (pager && pager->pg_type == PG_VNODE) {
320 vn_pager_t vnp = (vn_pager_t) pager->pg_data;
321
322 vnp->vnp_vp->v_flag &= ~VTEXT;
299 vp->v_flag &= ~VTEXT;
323 }
324
325 /*
326 * See if this object can persist and has some resident
327 * pages. If so, enter it in the cache.
328 */
329 if (object->flags & OBJ_CANPERSIST) {
330 if (object->resident_page_count != 0) {
300 }
301
302 /*
303 * See if this object can persist and has some resident
304 * pages. If so, enter it in the cache.
305 */
306 if (object->flags & OBJ_CANPERSIST) {
307 if (object->resident_page_count != 0) {
331 vm_object_page_clean(object, 0, 0 ,TRUE);
308 vm_object_page_clean(object, 0, 0 ,TRUE, TRUE);
332 TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
333 cached_list);
334 vm_object_cached++;
309 TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
310 cached_list);
311 vm_object_cached++;
335 vm_object_cache_unlock();
336
312
337 vm_object_unlock(object);
338
339 vm_object_cache_trim();
340 return;
341 } else {
342 object->flags &= ~OBJ_CANPERSIST;
343 }
344 }
345
346 /*
313 vm_object_cache_trim();
314 return;
315 } else {
316 object->flags &= ~OBJ_CANPERSIST;
317 }
318 }
319
320 /*
347 * Make sure no one can look us up now.
321 * Make sure no one uses us.
348 */
349 object->flags |= OBJ_DEAD;
322 */
323 object->flags |= OBJ_DEAD;
350 if ((object->flags & OBJ_INTERNAL) == 0)
351 vm_object_remove(pager);
352 vm_object_cache_unlock();
353
324
354 temp = object->shadow;
325 temp = object->backing_object;
355 if (temp)
326 if (temp)
356 TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list);
327 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
357 vm_object_terminate(object);
358 /* unlocks and deallocates object */
359 object = temp;
360 }
361}
362
363/*
364 * vm_object_terminate actually destroys the specified object, freeing
365 * up all previously used resources.
366 *
367 * The object must be locked.
368 */
369void
370vm_object_terminate(object)
371 register vm_object_t object;
372{
373 register vm_page_t p, next;
328 vm_object_terminate(object);
329 /* unlocks and deallocates object */
330 object = temp;
331 }
332}
333
334/*
335 * vm_object_terminate actually destroys the specified object, freeing
336 * up all previously used resources.
337 *
338 * The object must be locked.
339 */
340void
341vm_object_terminate(object)
342 register vm_object_t object;
343{
344 register vm_page_t p, next;
374 vm_object_t shadow_object;
345 vm_object_t backing_object;
375 int s;
376
377 /*
346 int s;
347
348 /*
378 * Detach the object from its shadow if we are the shadow's copy.
379 */
380 if ((shadow_object = object->shadow) != NULL) {
381 vm_object_lock(shadow_object);
382 if (shadow_object->copy == object)
383 shadow_object->copy = NULL;
384 vm_object_unlock(shadow_object);
385 }
386
387 /*
388 * wait for the pageout daemon to be done with the object
389 */
390 s = splhigh();
391 while (object->paging_in_progress) {
349 * wait for the pageout daemon to be done with the object
350 */
351 s = splhigh();
352 while (object->paging_in_progress) {
392 vm_object_unlock(object);
393 object->flags |= OBJ_PIPWNT;
353 object->flags |= OBJ_PIPWNT;
394 tsleep((caddr_t) object, PVM, "objtrm", 0);
395 vm_object_lock(object);
354 tsleep(object, PVM, "objtrm", 0);
396 }
397 splx(s);
398
399 if (object->paging_in_progress != 0)
400 panic("vm_object_deallocate: pageout in progress");
401
402 /*
403 * Clean and free the pages, as appropriate. All references to the
404 * object are gone, so we don't need to lock it.
405 */
355 }
356 splx(s);
357
358 if (object->paging_in_progress != 0)
359 panic("vm_object_deallocate: pageout in progress");
360
361 /*
362 * Clean and free the pages, as appropriate. All references to the
363 * object are gone, so we don't need to lock it.
364 */
406 if (object->pager && (object->pager->pg_type == PG_VNODE)) {
407 vn_pager_t vnp = object->pager->pg_data;
408 struct vnode *vp;
365 if (object->type == OBJT_VNODE) {
366 struct vnode *vp = object->handle;
409
367
410 vp = vnp->vnp_vp;
411 VOP_LOCK(vp);
368 VOP_LOCK(vp);
412 (void) _vm_object_page_clean(object, 0, 0, TRUE);
369 vm_object_page_clean(object, 0, 0, TRUE, FALSE);
413 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
414 VOP_UNLOCK(vp);
415 }
416
417 /*
418 * Now free the pages. For internal objects, this also removes them
419 * from paging queues.
420 */
421 while ((p = object->memq.tqh_first) != NULL) {
370 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
371 VOP_UNLOCK(vp);
372 }
373
374 /*
375 * Now free the pages. For internal objects, this also removes them
376 * from paging queues.
377 */
378 while ((p = object->memq.tqh_first) != NULL) {
422 VM_PAGE_CHECK(p);
423 vm_page_lock_queues();
424 if (p->flags & PG_BUSY)
425 printf("vm_object_terminate: freeing busy page\n");
426 PAGE_WAKEUP(p);
427 vm_page_free(p);
428 cnt.v_pfree++;
379 if (p->flags & PG_BUSY)
380 printf("vm_object_terminate: freeing busy page\n");
381 PAGE_WAKEUP(p);
382 vm_page_free(p);
383 cnt.v_pfree++;
429 vm_page_unlock_queues();
430 }
384 }
431 vm_object_unlock(object);
432
433 /*
434 * Let the pager know object is dead.
435 */
385
386 /*
387 * Let the pager know object is dead.
388 */
436 if (object->pager != NULL)
437 vm_pager_deallocate(object->pager);
389 vm_pager_deallocate(object);
438
390
439 simple_lock(&vm_object_list_lock);
440 TAILQ_REMOVE(&vm_object_list, object, object_list);
441 vm_object_count--;
391 TAILQ_REMOVE(&vm_object_list, object, object_list);
392 vm_object_count--;
442 simple_unlock(&vm_object_list_lock);
443
444 wakeup(object);
445
446 /*
447 * Free the space for the object.
448 */
449 free((caddr_t) object, M_VMOBJ);
450}

--- 5 unchanged lines hidden (view full) ---

456 * Leaves page on whatever queue it is currently on.
457 *
458 * Odd semantics: if start == end, we clean everything.
459 *
460 * The object must be locked.
461 */
462
463void
393
394 wakeup(object);
395
396 /*
397 * Free the space for the object.
398 */
399 free((caddr_t) object, M_VMOBJ);
400}

--- 5 unchanged lines hidden (view full) ---

406 * Leaves page on whatever queue it is currently on.
407 *
408 * Odd semantics: if start == end, we clean everything.
409 *
410 * The object must be locked.
411 */
412
413void
464_vm_object_page_clean(object, start, end, syncio)
414vm_object_page_clean(object, start, end, syncio, lockflag)
465 vm_object_t object;
466 vm_offset_t start;
467 vm_offset_t end;
468 boolean_t syncio;
415 vm_object_t object;
416 vm_offset_t start;
417 vm_offset_t end;
418 boolean_t syncio;
419 boolean_t lockflag;
469{
470 register vm_page_t p;
471 register vm_offset_t tstart, tend;
472 int pass;
473 int pgcount, s;
474 int allclean;
475 int entireobj;
420{
421 register vm_page_t p;
422 register vm_offset_t tstart, tend;
423 int pass;
424 int pgcount, s;
425 int allclean;
426 int entireobj;
427 struct vnode *vp;
476
428
477 if (object->pager == NULL || (object->flags & OBJ_WRITEABLE) == 0)
429 if (object->type != OBJT_VNODE || (object->flags & OBJ_WRITEABLE) == 0)
478 return;
479
430 return;
431
432 vp = object->handle;
433
434 if (lockflag)
435 VOP_LOCK(vp);
436
480 if (start != end) {
481 start = trunc_page(start);
482 end = round_page(end);
483 }
484
485 pass = 0;
486startover:
487 tstart = start;
488 if (end == 0) {
489 tend = object->size;
490 } else {
491 tend = end;
492 }
493 entireobj = 0;
494 if (tstart == 0 && tend == object->size) {
495 object->flags &= ~OBJ_WRITEABLE;
496 entireobj = 1;
497 }
437 if (start != end) {
438 start = trunc_page(start);
439 end = round_page(end);
440 }
441
442 pass = 0;
443startover:
444 tstart = start;
445 if (end == 0) {
446 tend = object->size;
447 } else {
448 tend = end;
449 }
450 entireobj = 0;
451 if (tstart == 0 && tend == object->size) {
452 object->flags &= ~OBJ_WRITEABLE;
453 entireobj = 1;
454 }
498 /*
499 * Wait until potential collapse operation is complete
500 */
501 if (object->flags & OBJ_INTERNAL) {
502 s = splhigh();
503 while (object->paging_in_progress) {
504 object->flags |= OBJ_PIPWNT;
505 tsleep(object, PVM, "objpcw", 0);
506 }
507 splx(s);
508 }
509
510 pgcount = object->resident_page_count;
511
512 if (pass == 0 &&
513 (pgcount < 128 || pgcount > (object->size / (8 * PAGE_SIZE)))) {
514 allclean = 1;
515 for(; pgcount && (tstart < tend); tstart += PAGE_SIZE) {
516 p = vm_page_lookup(object, tstart);

--- 21 unchanged lines hidden (view full) ---

538 }
539 allclean = 0;
540 }
541 }
542 if (!allclean) {
543 pass = 1;
544 goto startover;
545 }
455
456 pgcount = object->resident_page_count;
457
458 if (pass == 0 &&
459 (pgcount < 128 || pgcount > (object->size / (8 * PAGE_SIZE)))) {
460 allclean = 1;
461 for(; pgcount && (tstart < tend); tstart += PAGE_SIZE) {
462 p = vm_page_lookup(object, tstart);

--- 21 unchanged lines hidden (view full) ---

484 }
485 allclean = 0;
486 }
487 }
488 if (!allclean) {
489 pass = 1;
490 goto startover;
491 }
492 if (lockflag)
493 VOP_UNLOCK(vp);
546 return;
547 }
548
549 allclean = 1;
550 while ((p = object->memq.tqh_first) != NULL && pgcount > 0) {
551
552 if (p->flags & PG_CACHE) {
553 goto donext;

--- 40 unchanged lines hidden (view full) ---

594 }
595 if ((!allclean && (pass == 0)) ||
596 (entireobj && (object->flags & OBJ_WRITEABLE))) {
597 pass = 1;
598 if (entireobj)
599 object->flags &= ~OBJ_WRITEABLE;
600 goto startover;
601 }
494 return;
495 }
496
497 allclean = 1;
498 while ((p = object->memq.tqh_first) != NULL && pgcount > 0) {
499
500 if (p->flags & PG_CACHE) {
501 goto donext;

--- 40 unchanged lines hidden (view full) ---

542 }
543 if ((!allclean && (pass == 0)) ||
544 (entireobj && (object->flags & OBJ_WRITEABLE))) {
545 pass = 1;
546 if (entireobj)
547 object->flags &= ~OBJ_WRITEABLE;
548 goto startover;
549 }
550 if (lockflag)
551 VOP_UNLOCK(vp);
602 return;
603}
604
552 return;
553}
554
605
606void
607vm_object_page_clean(object, start, end, syncio)
608 register vm_object_t object;
609 register vm_offset_t start;
610 register vm_offset_t end;
611 boolean_t syncio;
612{
613 if (object->pager && (object->flags & OBJ_WRITEABLE) &&
614 (object->pager->pg_type == PG_VNODE)) {
615 vn_pager_t vnp = (vn_pager_t) object->pager->pg_data;
616 struct vnode *vp;
617
618 vp = vnp->vnp_vp;
619 VOP_LOCK(vp);
620 _vm_object_page_clean(object, start, end, syncio);
621 VOP_UNLOCK(vp);
622 } else {
623 _vm_object_page_clean(object, start, end, syncio);
624 }
625}
626
627void
628vm_object_cache_clean()
629{
630 vm_object_t object;
631 vm_object_cache_lock();
632 while(1) {
633 object = vm_object_cached_list.tqh_first;
634 while( object) {
635 if( (object->flags & OBJ_WRITEABLE) &&
636 object->pager &&
637 object->pager->pg_type == PG_VNODE) {
638 vm_object_page_clean(object, 0, 0, 0);
639 goto loop;
640 }
641 object = object->cached_list.tqe_next;
642 }
643 return;
644loop:
645 }
646}
647
648/*
649 * vm_object_deactivate_pages
650 *
651 * Deactivate all pages in the specified object. (Keep its pages
652 * in memory even though it is no longer referenced.)
653 *
654 * The object must be locked.
655 */
656void
657vm_object_deactivate_pages(object)
658 register vm_object_t object;
659{
660 register vm_page_t p, next;
661
662 for (p = object->memq.tqh_first; p != NULL; p = next) {
663 next = p->listq.tqe_next;
555/*
556 * vm_object_deactivate_pages
557 *
558 * Deactivate all pages in the specified object. (Keep its pages
559 * in memory even though it is no longer referenced.)
560 *
561 * The object must be locked.
562 */
563void
564vm_object_deactivate_pages(object)
565 register vm_object_t object;
566{
567 register vm_page_t p, next;
568
569 for (p = object->memq.tqh_first; p != NULL; p = next) {
570 next = p->listq.tqe_next;
664 vm_page_lock_queues();
665 vm_page_deactivate(p);
571 vm_page_deactivate(p);
666 vm_page_unlock_queues();
667 }
668}
669
670/*
671 * Trim the object cache to size.
672 */
673void
674vm_object_cache_trim()
675{
676 register vm_object_t object;
677
572 }
573}
574
575/*
576 * Trim the object cache to size.
577 */
578void
579vm_object_cache_trim()
580{
581 register vm_object_t object;
582
678 vm_object_cache_lock();
679 while (vm_object_cached > vm_object_cache_max) {
680 object = vm_object_cached_list.tqh_first;
583 while (vm_object_cached > vm_object_cache_max) {
584 object = vm_object_cached_list.tqh_first;
681 vm_object_cache_unlock();
682
585
683 if (object != vm_object_lookup(object->pager))
684 panic("vm_object_cache_trim: I'm sooo confused.");
685
586 vm_object_reference(object);
686 pager_cache(object, FALSE);
587 pager_cache(object, FALSE);
687
688 vm_object_cache_lock();
689 }
588 }
690 vm_object_cache_unlock();
691}
692
693
694/*
695 * vm_object_pmap_copy:
696 *
697 * Makes all physical pages in the specified
698 * object range copy-on-write. No writeable

--- 7 unchanged lines hidden (view full) ---

706 register vm_offset_t start;
707 register vm_offset_t end;
708{
709 register vm_page_t p;
710
711 if (object == NULL)
712 return;
713
589}
590
591
592/*
593 * vm_object_pmap_copy:
594 *
595 * Makes all physical pages in the specified
596 * object range copy-on-write. No writeable

--- 7 unchanged lines hidden (view full) ---

604 register vm_offset_t start;
605 register vm_offset_t end;
606{
607 register vm_page_t p;
608
609 if (object == NULL)
610 return;
611
714 vm_object_lock(object);
715 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
716 if ((start <= p->offset) && (p->offset < end)) {
717 vm_page_protect(p, VM_PROT_READ);
718 p->flags |= PG_COPYONWRITE;
719 }
720 }
612 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
613 if ((start <= p->offset) && (p->offset < end)) {
614 vm_page_protect(p, VM_PROT_READ);
615 p->flags |= PG_COPYONWRITE;
616 }
617 }
721 vm_object_unlock(object);
722}
723
724/*
725 * vm_object_pmap_remove:
726 *
727 * Removes all physical pages in the specified
728 * object range from all physical maps.
729 *

--- 7 unchanged lines hidden (view full) ---

737{
738 register vm_page_t p;
739 int s;
740
741 if (object == NULL)
742 return;
743 ++object->paging_in_progress;
744
618}
619
620/*
621 * vm_object_pmap_remove:
622 *
623 * Removes all physical pages in the specified
624 * object range from all physical maps.
625 *

--- 7 unchanged lines hidden (view full) ---

633{
634 register vm_page_t p;
635 int s;
636
637 if (object == NULL)
638 return;
639 ++object->paging_in_progress;
640
745 vm_object_lock(object);
746again:
747 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
748 if ((start <= p->offset) && (p->offset < end)) {
749 s = splhigh();
750 if ((p->flags & PG_BUSY) || p->busy) {
751 p->flags |= PG_WANTED;
641again:
642 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
643 if ((start <= p->offset) && (p->offset < end)) {
644 s = splhigh();
645 if ((p->flags & PG_BUSY) || p->busy) {
646 p->flags |= PG_WANTED;
752 tsleep((caddr_t) p, PVM, "vmopmr", 0);
647 tsleep(p, PVM, "vmopmr", 0);
753 splx(s);
754 goto again;
755 }
756 splx(s);
757 vm_page_protect(p, VM_PROT_NONE);
758 }
759 }
648 splx(s);
649 goto again;
650 }
651 splx(s);
652 vm_page_protect(p, VM_PROT_NONE);
653 }
654 }
760 vm_object_unlock(object);
761 vm_object_pip_wakeup(object);
762}
763
764/*
765 * vm_object_copy:
766 *
767 * Create a new object which is a copy of an existing
768 * object, and mark all of the pages in the existing

--- 23 unchanged lines hidden (view full) ---

792 /*
793 * Nothing to copy
794 */
795 *dst_object = NULL;
796 *dst_offset = 0;
797 *src_needs_copy = FALSE;
798 return;
799 }
655 vm_object_pip_wakeup(object);
656}
657
658/*
659 * vm_object_copy:
660 *
661 * Create a new object which is a copy of an existing
662 * object, and mark all of the pages in the existing

--- 23 unchanged lines hidden (view full) ---

686 /*
687 * Nothing to copy
688 */
689 *dst_object = NULL;
690 *dst_offset = 0;
691 *src_needs_copy = FALSE;
692 return;
693 }
800 /*
801 * If the object's pager is null_pager or the default pager, we don't
802 * have to make a copy of it. Instead, we set the needs copy flag and
803 * make a shadow later.
804 */
805
694
806 vm_object_lock(src_object);
807
808 /*
809 * Try to collapse the object before copying it.
810 */
695 /*
696 * Try to collapse the object before copying it.
697 */
698 if (src_object->handle == NULL &&
699 (src_object->type == OBJT_DEFAULT ||
700 src_object->type == OBJT_SWAP))
701 vm_object_collapse(src_object);
811
702
812 vm_object_collapse(src_object);
813
703
814 if (src_object->pager == NULL ||
815 (src_object->flags & OBJ_INTERNAL)) {
816
817 /*
818 * Make another reference to the object
819 */
820 src_object->ref_count++;
821
822 /*
823 * Mark all of the pages copy-on-write.
824 */
825 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
826 if (src_offset <= p->offset &&
827 p->offset < src_offset + size)
828 p->flags |= PG_COPYONWRITE;
829 vm_object_unlock(src_object);
830
831 *dst_object = src_object;
832 *dst_offset = src_offset;
833
834 /*
835 * Must make a shadow when write is desired
836 */
837 *src_needs_copy = TRUE;
838 return;
839 }
840 /*
704 /*
841 * If the object has a pager, the pager wants to see all of the
842 * changes. We need a copy-object for the changed pages.
843 *
844 * If there is a copy-object, and it is empty, no changes have been made
845 * to the object since the copy-object was made. We can use the same
846 * copy- object.
705 * Make another reference to the object
847 */
706 */
848
849Retry1:
850 old_copy = src_object->copy;
851 if (old_copy != NULL) {
852 /*
853 * Try to get the locks (out of order)
854 */
855 if (!vm_object_lock_try(old_copy)) {
856 vm_object_unlock(src_object);
857
858 /* should spin a bit here... */
859 tsleep((caddr_t) old_copy, PVM, "cpylck", 1);
860 vm_object_lock(src_object);
861 goto Retry1;
862 }
863 if (old_copy->resident_page_count == 0 &&
864 old_copy->pager == NULL) {
865 /*
866 * Return another reference to the existing
867 * copy-object.
868 */
869 old_copy->ref_count++;
870 vm_object_unlock(old_copy);
871 vm_object_unlock(src_object);
872 *dst_object = old_copy;
873 *dst_offset = src_offset;
874 *src_needs_copy = FALSE;
875 return;
876 }
877 vm_object_unlock(old_copy);
878 }
879 vm_object_unlock(src_object);
880
881 /*
882 * If the object has a pager, the pager wants to see all of the
883 * changes. We must make a copy-object and put the changed pages
884 * there.
885 *
886 * The copy-object is always made large enough to completely shadow the
887 * original object, since it may have several users who want to shadow
888 * the original object at different points.
889 */
890
891 new_copy = vm_object_allocate(src_object->size);
892
893Retry2:
894 vm_object_lock(src_object);
895 /*
896 * Copy object may have changed while we were unlocked
897 */
898 old_copy = src_object->copy;
899 if (old_copy != NULL) {
900 /*
901 * Try to get the locks (out of order)
902 */
903 if (!vm_object_lock_try(old_copy)) {
904 vm_object_unlock(src_object);
905 tsleep((caddr_t) old_copy, PVM, "cpylck", 1);
906 goto Retry2;
907 }
908 /*
909 * Consistency check
910 */
911 if (old_copy->shadow != src_object ||
912 old_copy->shadow_offset != (vm_offset_t) 0)
913 panic("vm_object_copy: copy/shadow inconsistency");
914
915 /*
916 * Make the old copy-object shadow the new one. It will
917 * receive no more pages from the original object.
918 */
919
920 src_object->ref_count--; /* remove ref. from old_copy */
921 if (old_copy->shadow)
922 TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list);
923 old_copy->shadow = new_copy;
924 TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list);
925 new_copy->ref_count++; /* locking not needed - we have the
926 * only pointer */
927 vm_object_unlock(old_copy); /* done with old_copy */
928 }
929 new_start = (vm_offset_t) 0; /* always shadow original at 0 */
930 new_end = (vm_offset_t) new_copy->size; /* for the whole object */
931
932 /*
933 * Point the new copy at the existing object.
934 */
935
936 new_copy->shadow = src_object;
937 TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list);
938 new_copy->shadow_offset = new_start;
939 src_object->ref_count++;
707 src_object->ref_count++;
940 src_object->copy = new_copy;
941
942 /*
708
709 /*
943 * Mark all the affected pages of the existing object copy-on-write.
710 * Mark all of the pages copy-on-write.
944 */
711 */
945 for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
946 if ((new_start <= p->offset) && (p->offset < new_end))
712 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
713 if (src_offset <= p->offset &&
714 p->offset < src_offset + size)
947 p->flags |= PG_COPYONWRITE;
948
715 p->flags |= PG_COPYONWRITE;
716
949 vm_object_unlock(src_object);
717 *dst_object = src_object;
718 *dst_offset = src_offset;
950
719
951 *dst_object = new_copy;
952 *dst_offset = src_offset - new_start;
953 *src_needs_copy = FALSE;
720 /*
721 * Must make a shadow when write is desired
722 */
723 *src_needs_copy = TRUE;
724 return;
954}
955
956/*
957 * vm_object_shadow:
958 *
959 * Create a new object which is backed by the
960 * specified existing object range. The source
961 * object reference is deallocated.

--- 12 unchanged lines hidden (view full) ---

974 register vm_object_t result;
975
976 source = *object;
977
978 /*
979 * Allocate a new object with the given length
980 */
981
725}
726
727/*
728 * vm_object_shadow:
729 *
730 * Create a new object which is backed by the
731 * specified existing object range. The source
732 * object reference is deallocated.

--- 12 unchanged lines hidden (view full) ---

745 register vm_object_t result;
746
747 source = *object;
748
749 /*
750 * Allocate a new object with the given length
751 */
752
982 if ((result = vm_object_allocate(length)) == NULL)
753 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
983 panic("vm_object_shadow: no object for shadowing");
984
985 /*
986 * The new object shadows the source object, adding a reference to it.
987 * Our caller changes his reference to point to the new object,
988 * removing a reference to the source object. Net result: no change
989 * of reference count.
990 */
754 panic("vm_object_shadow: no object for shadowing");
755
756 /*
757 * The new object shadows the source object, adding a reference to it.
758 * Our caller changes his reference to point to the new object,
759 * removing a reference to the source object. Net result: no change
760 * of reference count.
761 */
991 result->shadow = source;
762 result->backing_object = source;
992 if (source)
763 if (source)
993 TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list);
764 TAILQ_INSERT_TAIL(&result->backing_object->shadow_head, result, shadow_list);
994
995 /*
996 * Store the offset into the source object, and fix up the offset into
997 * the new object.
998 */
999
765
766 /*
767 * Store the offset into the source object, and fix up the offset into
768 * the new object.
769 */
770
1000 result->shadow_offset = *offset;
771 result->backing_object_offset = *offset;
1001
1002 /*
1003 * Return the new things
1004 */
1005
1006 *offset = 0;
1007 *object = result;
1008}
1009
1010
1011/*
772
773 /*
774 * Return the new things
775 */
776
777 *offset = 0;
778 *object = result;
779}
780
781
782/*
1012 * vm_object_lookup looks in the object cache for an object with the
1013 * specified pager and paging id.
1014 */
1015
1016vm_object_t
1017vm_object_lookup(pager)
1018 vm_pager_t pager;
1019{
1020 register vm_object_hash_entry_t entry;
1021 vm_object_t object;
1022
1023 vm_object_cache_lock();
1024
1025 for (entry = vm_object_hashtable[OBJECT_HASH(pager)].lh_first;
1026 entry != NULL; entry = entry->hash_links.le_next) {
1027 object = entry->object;
1028 if (object->pager == pager) {
1029 vm_object_lock(object);
1030 if (object->ref_count == 0) {
1031 TAILQ_REMOVE(&vm_object_cached_list, object,
1032 cached_list);
1033 vm_object_cached--;
1034 }
1035 object->ref_count++;
1036 vm_object_unlock(object);
1037 vm_object_cache_unlock();
1038 return (object);
1039 }
1040 }
1041
1042 vm_object_cache_unlock();
1043 return (NULL);
1044}
1045
1046/*
1047 * vm_object_enter enters the specified object/pager/id into
1048 * the hash table.
1049 */
1050
1051void
1052vm_object_enter(object, pager)
1053 vm_object_t object;
1054 vm_pager_t pager;
1055{
1056 struct vm_object_hash_head *bucket;
1057 register vm_object_hash_entry_t entry;
1058
1059 /*
1060 * We don't cache null objects, and we can't cache objects with the
1061 * null pager.
1062 */
1063
1064 if (object == NULL)
1065 return;
1066 if (pager == NULL)
1067 return;
1068
1069 bucket = &vm_object_hashtable[OBJECT_HASH(pager)];
1070 entry = (vm_object_hash_entry_t)
1071 malloc((u_long) sizeof *entry, M_VMOBJHASH, M_WAITOK);
1072 entry->object = object;
1073
1074 vm_object_cache_lock();
1075 LIST_INSERT_HEAD(bucket, entry, hash_links);
1076 vm_object_cache_unlock();
1077}
1078
1079/*
1080 * vm_object_remove:
1081 *
1082 * Remove the pager from the hash table.
1083 * Note: This assumes that the object cache
1084 * is locked. XXX this should be fixed
1085 * by reorganizing vm_object_deallocate.
1086 */
1087void
1088vm_object_remove(pager)
1089 register vm_pager_t pager;
1090{
1091 struct vm_object_hash_head *bucket;
1092 register vm_object_hash_entry_t entry;
1093 register vm_object_t object;
1094
1095 bucket = &vm_object_hashtable[OBJECT_HASH(pager)];
1096
1097 for (entry = bucket->lh_first;
1098 entry != NULL; entry = entry->hash_links.le_next) {
1099 object = entry->object;
1100 if (object->pager == pager) {
1101 LIST_REMOVE(entry, hash_links);
1102 free((caddr_t) entry, M_VMOBJHASH);
1103 break;
1104 }
1105 }
1106}
1107
1108/*
1109 * this version of collapse allows the operation to occur earlier and
1110 * when paging_in_progress is true for an object... This is not a complete
1111 * operation, but should plug 99.9% of the rest of the leaks.
1112 */
1113static void
1114vm_object_qcollapse(object)
1115 register vm_object_t object;
1116{
1117 register vm_object_t backing_object;
1118 register vm_offset_t backing_offset, new_offset;
1119 register vm_page_t p, pp;
1120 register vm_size_t size;
1121
783 * this version of collapse allows the operation to occur earlier and
784 * when paging_in_progress is true for an object... This is not a complete
785 * operation, but should plug 99.9% of the rest of the leaks.
786 */
787static void
788vm_object_qcollapse(object)
789 register vm_object_t object;
790{
791 register vm_object_t backing_object;
792 register vm_offset_t backing_offset, new_offset;
793 register vm_page_t p, pp;
794 register vm_size_t size;
795
1122 backing_object = object->shadow;
1123 if (backing_object->shadow != NULL &&
1124 backing_object->shadow->copy == backing_object)
1125 return;
796 backing_object = object->backing_object;
1126 if (backing_object->ref_count != 1)
1127 return;
1128
1129 backing_object->ref_count += 2;
1130
797 if (backing_object->ref_count != 1)
798 return;
799
800 backing_object->ref_count += 2;
801
1131 backing_offset = object->shadow_offset;
802 backing_offset = object->backing_object_offset;
1132 size = object->size;
1133 p = backing_object->memq.tqh_first;
1134 while (p) {
1135 vm_page_t next;
1136
1137 next = p->listq.tqe_next;
1138 if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
1139 !p->valid || p->hold_count || p->wire_count || p->busy) {
1140 p = next;
1141 continue;
1142 }
1143 vm_page_protect(p, VM_PROT_NONE);
1144 new_offset = (p->offset - backing_offset);
1145 if (p->offset < backing_offset ||
1146 new_offset >= size) {
803 size = object->size;
804 p = backing_object->memq.tqh_first;
805 while (p) {
806 vm_page_t next;
807
808 next = p->listq.tqe_next;
809 if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
810 !p->valid || p->hold_count || p->wire_count || p->busy) {
811 p = next;
812 continue;
813 }
814 vm_page_protect(p, VM_PROT_NONE);
815 new_offset = (p->offset - backing_offset);
816 if (p->offset < backing_offset ||
817 new_offset >= size) {
1147 if (backing_object->pager)
1148 swap_pager_freespace(backing_object->pager,
818 if (backing_object->type == OBJT_SWAP)
819 swap_pager_freespace(backing_object,
1149 backing_object->paging_offset + p->offset, PAGE_SIZE);
820 backing_object->paging_offset + p->offset, PAGE_SIZE);
1150 vm_page_lock_queues();
1151 vm_page_free(p);
821 vm_page_free(p);
1152 vm_page_unlock_queues();
1153 } else {
1154 pp = vm_page_lookup(object, new_offset);
822 } else {
823 pp = vm_page_lookup(object, new_offset);
1155 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1156 object->paging_offset + new_offset))) {
1157 if (backing_object->pager)
1158 swap_pager_freespace(backing_object->pager,
824 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
825 object->paging_offset + new_offset, NULL, NULL))) {
826 if (backing_object->type == OBJT_SWAP)
827 swap_pager_freespace(backing_object,
1159 backing_object->paging_offset + p->offset, PAGE_SIZE);
828 backing_object->paging_offset + p->offset, PAGE_SIZE);
1160 vm_page_lock_queues();
1161 vm_page_free(p);
829 vm_page_free(p);
1162 vm_page_unlock_queues();
1163 } else {
830 } else {
1164 if( backing_object->pager)
1165 swap_pager_freespace(backing_object->pager,
831 if (backing_object->type == OBJT_SWAP)
832 swap_pager_freespace(backing_object,
1166 backing_object->paging_offset + p->offset, PAGE_SIZE);
1167 vm_page_rename(p, object, new_offset);
1168 p->dirty = VM_PAGE_BITS_ALL;
1169 }
1170 }
1171 p = next;
1172 }
1173 backing_object->ref_count -= 2;
1174}
1175
833 backing_object->paging_offset + p->offset, PAGE_SIZE);
834 vm_page_rename(p, object, new_offset);
835 p->dirty = VM_PAGE_BITS_ALL;
836 }
837 }
838 p = next;
839 }
840 backing_object->ref_count -= 2;
841}
842
1176boolean_t vm_object_collapse_allowed = TRUE;
1177
1178/*
1179 * vm_object_collapse:
1180 *
1181 * Collapse an object with the object backing it.
1182 * Pages in the backing object are moved into the
1183 * parent, and the backing object is deallocated.
843/*
844 * vm_object_collapse:
845 *
846 * Collapse an object with the object backing it.
847 * Pages in the backing object are moved into the
848 * parent, and the backing object is deallocated.
1184 *
1185 * Requires that the object be locked and the page
1186 * queues be unlocked.
1187 *
1188 * This routine has significant changes by John S. Dyson
1189 * to fix some swap memory leaks. 18 Dec 93
1190 *
1191 */
1192void
1193vm_object_collapse(object)
849 */
850void
851vm_object_collapse(object)
1194 register vm_object_t object;
852 vm_object_t object;
1195
1196{
853
854{
1197 register vm_object_t backing_object;
1198 register vm_offset_t backing_offset;
1199 register vm_size_t size;
1200 register vm_offset_t new_offset;
1201 register vm_page_t p, pp;
855 vm_object_t backing_object;
856 vm_offset_t backing_offset;
857 vm_size_t size;
858 vm_offset_t new_offset;
859 vm_page_t p, pp;
1202
860
1203 if (!vm_object_collapse_allowed)
1204 return;
1205
1206 while (TRUE) {
1207 /*
1208 * Verify that the conditions are right for collapse:
1209 *
1210 * The object exists and no pages in it are currently being paged
1211 * out.
1212 */
1213 if (object == NULL)
1214 return;
1215
1216 /*
1217 * Make sure there is a backing object.
1218 */
861 while (TRUE) {
862 /*
863 * Verify that the conditions are right for collapse:
864 *
865 * The object exists and no pages in it are currently being paged
866 * out.
867 */
868 if (object == NULL)
869 return;
870
871 /*
872 * Make sure there is a backing object.
873 */
1219 if ((backing_object = object->shadow) == NULL)
874 if ((backing_object = object->backing_object) == NULL)
1220 return;
1221
1222 /*
1223 * we check the backing object first, because it is most likely
875 return;
876
877 /*
878 * we check the backing object first, because it is most likely
1224 * !OBJ_INTERNAL.
879 * not collapsable.
1225 */
880 */
1226 if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
881 if (backing_object->handle != NULL ||
882 (backing_object->type != OBJT_DEFAULT &&
883 backing_object->type != OBJT_SWAP) ||
1227 (backing_object->flags & OBJ_DEAD) ||
884 (backing_object->flags & OBJ_DEAD) ||
1228 (object->flags & OBJ_INTERNAL) == 0 ||
1229 (object->flags & OBJ_DEAD))
885 object->handle != NULL ||
886 (object->type != OBJT_DEFAULT &&
887 object->type != OBJT_SWAP) ||
888 (object->flags & OBJ_DEAD)) {
1230 return;
889 return;
890 }
1231
1232 if (object->paging_in_progress != 0 ||
1233 backing_object->paging_in_progress != 0) {
891
892 if (object->paging_in_progress != 0 ||
893 backing_object->paging_in_progress != 0) {
1234 if (vm_object_lock_try(backing_object)) {
1235 vm_object_qcollapse(object);
1236 vm_object_unlock(backing_object);
1237 }
894 vm_object_qcollapse(object);
1238 return;
1239 }
1240
895 return;
896 }
897
1241 vm_object_lock(backing_object);
1242
1243 /*
898 /*
1244 * The backing object can't be a copy-object: the
1245 * shadow_offset for the copy-object must stay as 0.
1246 * Furthermore (for the 'we have all the pages' case), if we
1247 * bypass backing_object and just shadow the next object in
1248 * the chain, old pages from that object would then have to be
1249 * copied BOTH into the (former) backing_object and into the
1250 * parent object.
1251 */
1252 if (backing_object->shadow != NULL &&
1253 backing_object->shadow->copy == backing_object) {
1254 vm_object_unlock(backing_object);
1255 return;
1256 }
1257
1258 /*
1259 * We know that we can either collapse the backing object (if
1260 * the parent is the only reference to it) or (perhaps) remove
1261 * the parent's reference to it.
1262 */
1263
899 * We know that we can either collapse the backing object (if
900 * the parent is the only reference to it) or (perhaps) remove
901 * the parent's reference to it.
902 */
903
1264 backing_offset = object->shadow_offset;
904 backing_offset = object->backing_object_offset;
1265 size = object->size;
1266
1267 /*
1268 * If there is exactly one reference to the backing object, we
1269 * can collapse it into the parent.
1270 */
1271
1272 if (backing_object->ref_count == 1) {

--- 17 unchanged lines hidden (view full) ---

1290 * page falls outside the parent, dispose of
1291 * it.
1292 *
1293 * Otherwise, move it as planned.
1294 */
1295
1296 if (p->offset < backing_offset ||
1297 new_offset >= size) {
905 size = object->size;
906
907 /*
908 * If there is exactly one reference to the backing object, we
909 * can collapse it into the parent.
910 */
911
912 if (backing_object->ref_count == 1) {

--- 17 unchanged lines hidden (view full) ---

930 * page falls outside the parent, dispose of
931 * it.
932 *
933 * Otherwise, move it as planned.
934 */
935
936 if (p->offset < backing_offset ||
937 new_offset >= size) {
1298 vm_page_lock_queues();
1299 vm_page_protect(p, VM_PROT_NONE);
1300 PAGE_WAKEUP(p);
1301 vm_page_free(p);
938 vm_page_protect(p, VM_PROT_NONE);
939 PAGE_WAKEUP(p);
940 vm_page_free(p);
1302 vm_page_unlock_queues();
1303 } else {
1304 pp = vm_page_lookup(object, new_offset);
941 } else {
942 pp = vm_page_lookup(object, new_offset);
1305 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1306 object->paging_offset + new_offset))) {
1307 vm_page_lock_queues();
943 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
944 object->paging_offset + new_offset, NULL, NULL))) {
1308 vm_page_protect(p, VM_PROT_NONE);
1309 PAGE_WAKEUP(p);
1310 vm_page_free(p);
945 vm_page_protect(p, VM_PROT_NONE);
946 PAGE_WAKEUP(p);
947 vm_page_free(p);
1311 vm_page_unlock_queues();
1312 } else {
1313 vm_page_rename(p, object, new_offset);
1314 }
1315 }
1316 }
1317
1318 /*
1319 * Move the pager from backing_object to object.
1320 */
1321
948 } else {
949 vm_page_rename(p, object, new_offset);
950 }
951 }
952 }
953
954 /*
955 * Move the pager from backing_object to object.
956 */
957
1322 if (backing_object->pager) {
958 if (backing_object->type == OBJT_SWAP) {
1323 backing_object->paging_in_progress++;
959 backing_object->paging_in_progress++;
1324 if (object->pager) {
1325 vm_pager_t bopager;
1326
960 if (object->type == OBJT_SWAP) {
1327 object->paging_in_progress++;
1328 /*
1329 * copy shadow object pages into ours
1330 * and destroy unneeded pages in
1331 * shadow object.
1332 */
961 object->paging_in_progress++;
962 /*
963 * copy shadow object pages into ours
964 * and destroy unneeded pages in
965 * shadow object.
966 */
1333 bopager = backing_object->pager;
1334 backing_object->pager = NULL;
1335 swap_pager_copy(
967 swap_pager_copy(
1336 bopager, backing_object->paging_offset,
1337 object->pager, object->paging_offset,
1338 object->shadow_offset);
968 backing_object, backing_object->paging_offset,
969 object, object->paging_offset,
970 object->backing_object_offset);
1339 vm_object_pip_wakeup(object);
1340 } else {
971 vm_object_pip_wakeup(object);
972 } else {
973 extern struct pagerlst swap_pager_un_object_list;
974
1341 object->paging_in_progress++;
1342 /*
975 object->paging_in_progress++;
976 /*
1343 * grab the shadow objects pager
977 * move the shadow backing_object's pager data to
978 * "object" and convert "object" type to OBJT_SWAP.
1344 */
979 */
1345 object->pager = backing_object->pager;
980 object->type = OBJT_SWAP;
981 object->pg_data = backing_object->pg_data;
1346 object->paging_offset = backing_object->paging_offset + backing_offset;
982 object->paging_offset = backing_object->paging_offset + backing_offset;
1347 backing_object->pager = NULL;
983 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
984
1348 /*
985 /*
986 * Convert backing object from OBJT_SWAP to
987 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is
988 * actually necessary.
989 */
990 backing_object->type = OBJT_DEFAULT;
991 backing_object->pg_data = NULL;
992 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list);
993 /*
1349 * free unnecessary blocks
1350 */
994 * free unnecessary blocks
995 */
1351 swap_pager_freespace(object->pager, 0, object->paging_offset);
996 swap_pager_freespace(object, 0, object->paging_offset);
1352 vm_object_pip_wakeup(object);
1353 }
1354
1355 vm_object_pip_wakeup(backing_object);
1356 }
1357 /*
1358 * Object now shadows whatever backing_object did.
997 vm_object_pip_wakeup(object);
998 }
999
1000 vm_object_pip_wakeup(backing_object);
1001 }
1002 /*
1003 * Object now shadows whatever backing_object did.
1359 * Note that the reference to backing_object->shadow
1004 * Note that the reference to backing_object->backing_object
1360 * moves from within backing_object to within object.
1361 */
1362
1005 * moves from within backing_object to within object.
1006 */
1007
1363 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object,
1364 reverse_shadow_list);
1365 if (backing_object->shadow)
1366 TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head,
1367 backing_object, reverse_shadow_list);
1368 object->shadow = backing_object->shadow;
1369 if (object->shadow)
1370 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head,
1371 object, reverse_shadow_list);
1008 TAILQ_REMOVE(&object->backing_object->shadow_head, object,
1009 shadow_list);
1010 if (backing_object->backing_object)
1011 TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
1012 backing_object, shadow_list);
1013 object->backing_object = backing_object->backing_object;
1014 if (object->backing_object)
1015 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1016 object, shadow_list);
1372
1017
1373 object->shadow_offset += backing_object->shadow_offset;
1018 object->backing_object_offset += backing_object->backing_object_offset;
1374 /*
1375 * Discard backing_object.
1376 *
1377 * Since the backing object has no pages, no pager left,
1378 * and no object references within it, all that is
1379 * necessary is to dispose of it.
1380 */
1381
1019 /*
1020 * Discard backing_object.
1021 *
1022 * Since the backing object has no pages, no pager left,
1023 * and no object references within it, all that is
1024 * necessary is to dispose of it.
1025 */
1026
1382 vm_object_unlock(backing_object);
1383
1384 simple_lock(&vm_object_list_lock);
1385 TAILQ_REMOVE(&vm_object_list, backing_object,
1386 object_list);
1387 vm_object_count--;
1027 TAILQ_REMOVE(&vm_object_list, backing_object,
1028 object_list);
1029 vm_object_count--;
1388 simple_unlock(&vm_object_list_lock);
1389
1390 free((caddr_t) backing_object, M_VMOBJ);
1391
1392 object_collapses++;
1393 } else {
1394 /*
1395 * If all of the pages in the backing object are
1396 * shadowed by the parent object, the parent object no
1397 * longer has to shadow the backing object; it can
1398 * shadow the next one in the chain.
1399 *
1400 * The backing object must not be paged out - we'd have
1401 * to check all of the paged-out pages, as well.
1402 */
1403
1030
1031 free((caddr_t) backing_object, M_VMOBJ);
1032
1033 object_collapses++;
1034 } else {
1035 /*
1036 * If all of the pages in the backing object are
1037 * shadowed by the parent object, the parent object no
1038 * longer has to shadow the backing object; it can
1039 * shadow the next one in the chain.
1040 *
1041 * The backing object must not be paged out - we'd have
1042 * to check all of the paged-out pages, as well.
1043 */
1044
1404 if (backing_object->pager != NULL) {
1405 vm_object_unlock(backing_object);
1045 if (backing_object->type != OBJT_DEFAULT) {
1406 return;
1407 }
1408 /*
1409 * Should have a check for a 'small' number of pages
1410 * here.
1411 */
1412
1413 for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) {
1414 new_offset = (p->offset - backing_offset);
1415
1416 /*
1417 * If the parent has a page here, or if this
1418 * page falls outside the parent, keep going.
1419 *
1420 * Otherwise, the backing_object must be left in
1421 * the chain.
1422 */
1423
1046 return;
1047 }
1048 /*
1049 * Should have a check for a 'small' number of pages
1050 * here.
1051 */
1052
1053 for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) {
1054 new_offset = (p->offset - backing_offset);
1055
1056 /*
1057 * If the parent has a page here, or if this
1058 * page falls outside the parent, keep going.
1059 *
1060 * Otherwise, the backing_object must be left in
1061 * the chain.
1062 */
1063
1424 if (p->offset >= backing_offset &&
1425 new_offset <= size &&
1426 ((pp = vm_page_lookup(object, new_offset)) == NULL ||
1427 !pp->valid) &&
1428 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset + new_offset))) {
1429 /*
1430 * Page still needed. Can't go any
1431 * further.
1432 */
1433 vm_object_unlock(backing_object);
1434 return;
1064 if (p->offset >= backing_offset && new_offset <= size) {
1065
1066 pp = vm_page_lookup(object, new_offset);
1067
1068 if ((pp == NULL || pp->valid == 0) &&
1069 !vm_pager_has_page(object, object->paging_offset + new_offset, NULL, NULL)) {
1070
1071 /*
1072 * Page still needed. Can't go any
1073 * further.
1074 */
1075 return;
1076 }
1435 }
1436 }
1437
1438 /*
1439 * Make the parent shadow the next object in the
1440 * chain. Deallocating backing_object will not remove
1441 * it, since its reference count is at least 2.
1442 */
1443
1077 }
1078 }
1079
1080 /*
1081 * Make the parent shadow the next object in the
1082 * chain. Deallocating backing_object will not remove
1083 * it, since its reference count is at least 2.
1084 */
1085
1444 TAILQ_REMOVE(&object->shadow->reverse_shadow_head,
1445 object, reverse_shadow_list);
1446 vm_object_reference(object->shadow = backing_object->shadow);
1447 if (object->shadow)
1448 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head,
1449 object, reverse_shadow_list);
1450 object->shadow_offset += backing_object->shadow_offset;
1086 TAILQ_REMOVE(&object->backing_object->shadow_head,
1087 object, shadow_list);
1088 vm_object_reference(object->backing_object = backing_object->backing_object);
1089 if (object->backing_object)
1090 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1091 object, shadow_list);
1092 object->backing_object_offset += backing_object->backing_object_offset;
1451
1452 /*
1093
1094 /*
1453 * Backing object might have had a copy pointer to us.
1454 * If it did, clear it.
1455 */
1456 if (backing_object->copy == object) {
1457 backing_object->copy = NULL;
1458 }
1459 /*
1460 * Drop the reference count on backing_object. Since
1461 * its ref_count was at least 2, it will not vanish;
1462 * so we don't need to call vm_object_deallocate.
1463 */
1464 if (backing_object->ref_count == 1)
1465 printf("should have called obj deallocate\n");
1466 backing_object->ref_count--;
1095 * Drop the reference count on backing_object. Since
1096 * its ref_count was at least 2, it will not vanish;
1097 * so we don't need to call vm_object_deallocate.
1098 */
1099 if (backing_object->ref_count == 1)
1100 printf("should have called obj deallocate\n");
1101 backing_object->ref_count--;
1467 vm_object_unlock(backing_object);
1468
1469 object_bypasses++;
1470
1471 }
1472
1473 /*
1474 * Try again with this object's new backing object.
1475 */

--- 33 unchanged lines hidden (view full) ---

1509 if ((start <= p->offset) && (p->offset < end)) {
1510 s = splhigh();
1511 if (p->bmapped) {
1512 splx(s);
1513 continue;
1514 }
1515 if ((p->flags & PG_BUSY) || p->busy) {
1516 p->flags |= PG_WANTED;
1102
1103 object_bypasses++;
1104
1105 }
1106
1107 /*
1108 * Try again with this object's new backing object.
1109 */

--- 33 unchanged lines hidden (view full) ---

1143 if ((start <= p->offset) && (p->offset < end)) {
1144 s = splhigh();
1145 if (p->bmapped) {
1146 splx(s);
1147 continue;
1148 }
1149 if ((p->flags & PG_BUSY) || p->busy) {
1150 p->flags |= PG_WANTED;
1517 tsleep((caddr_t) p, PVM, "vmopar", 0);
1151 tsleep(p, PVM, "vmopar", 0);
1518 splx(s);
1519 goto again;
1520 }
1521 splx(s);
1522 if (clean_only) {
1523 vm_page_test_dirty(p);
1524 if (p->valid & p->dirty)
1525 continue;
1526 }
1527 vm_page_protect(p, VM_PROT_NONE);
1152 splx(s);
1153 goto again;
1154 }
1155 splx(s);
1156 if (clean_only) {
1157 vm_page_test_dirty(p);
1158 if (p->valid & p->dirty)
1159 continue;
1160 }
1161 vm_page_protect(p, VM_PROT_NONE);
1528 vm_page_lock_queues();
1529 PAGE_WAKEUP(p);
1530 vm_page_free(p);
1162 PAGE_WAKEUP(p);
1163 vm_page_free(p);
1531 vm_page_unlock_queues();
1532 }
1533 }
1534 } else {
1535 while (size > 0) {
1536 while ((p = vm_page_lookup(object, start)) != 0) {
1537 s = splhigh();
1538 if (p->bmapped) {
1539 splx(s);
1540 break;
1541 }
1542 if ((p->flags & PG_BUSY) || p->busy) {
1543 p->flags |= PG_WANTED;
1164 }
1165 }
1166 } else {
1167 while (size > 0) {
1168 while ((p = vm_page_lookup(object, start)) != 0) {
1169 s = splhigh();
1170 if (p->bmapped) {
1171 splx(s);
1172 break;
1173 }
1174 if ((p->flags & PG_BUSY) || p->busy) {
1175 p->flags |= PG_WANTED;
1544 tsleep((caddr_t) p, PVM, "vmopar", 0);
1176 tsleep(p, PVM, "vmopar", 0);
1545 splx(s);
1546 goto again;
1547 }
1548 splx(s);
1549 if (clean_only) {
1550 vm_page_test_dirty(p);
1551 if (p->valid & p->dirty)
1552 continue;
1553 }
1554 vm_page_protect(p, VM_PROT_NONE);
1177 splx(s);
1178 goto again;
1179 }
1180 splx(s);
1181 if (clean_only) {
1182 vm_page_test_dirty(p);
1183 if (p->valid & p->dirty)
1184 continue;
1185 }
1186 vm_page_protect(p, VM_PROT_NONE);
1555 vm_page_lock_queues();
1556 PAGE_WAKEUP(p);
1557 vm_page_free(p);
1187 PAGE_WAKEUP(p);
1188 vm_page_free(p);
1558 vm_page_unlock_queues();
1559 }
1560 start += PAGE_SIZE;
1561 size -= PAGE_SIZE;
1562 }
1563 }
1564 vm_object_pip_wakeup(object);
1565}
1566

--- 31 unchanged lines hidden (view full) ---

1598 vm_size_t newsize;
1599
1600 if (next_object != NULL) {
1601 return (FALSE);
1602 }
1603 if (prev_object == NULL) {
1604 return (TRUE);
1605 }
1189 }
1190 start += PAGE_SIZE;
1191 size -= PAGE_SIZE;
1192 }
1193 }
1194 vm_object_pip_wakeup(object);
1195}
1196

--- 31 unchanged lines hidden (view full) ---

1228 vm_size_t newsize;
1229
1230 if (next_object != NULL) {
1231 return (FALSE);
1232 }
1233 if (prev_object == NULL) {
1234 return (TRUE);
1235 }
1606 vm_object_lock(prev_object);
1607
1608 /*
1609 * Try to collapse the object first
1610 */
1611 vm_object_collapse(prev_object);
1612
1613 /*
1614 * Can't coalesce if: . more than one reference . paged out . shadows
1615 * another object . has a copy elsewhere (any of which mean that the
1616 * pages not mapped to prev_entry may be in use anyway)
1617 */
1618
1619 if (prev_object->ref_count > 1 ||
1236
1237 /*
1238 * Try to collapse the object first
1239 */
1240 vm_object_collapse(prev_object);
1241
1242 /*
1243 * Can't coalesce if: . more than one reference . paged out . shadows
1244 * another object . has a copy elsewhere (any of which mean that the
1245 * pages not mapped to prev_entry may be in use anyway)
1246 */
1247
1248 if (prev_object->ref_count > 1 ||
1620 prev_object->pager != NULL ||
1621 prev_object->shadow != NULL ||
1622 prev_object->copy != NULL) {
1623 vm_object_unlock(prev_object);
1249 prev_object->type != OBJT_DEFAULT ||
1250 prev_object->backing_object != NULL) {
1624 return (FALSE);
1625 }
1626 /*
1627 * Remove any pages that may still be in the object from a previous
1628 * deallocation.
1629 */
1630
1631 vm_object_page_remove(prev_object,
1632 prev_offset + prev_size,
1633 prev_offset + prev_size + next_size, FALSE);
1634
1635 /*
1636 * Extend the object if necessary.
1637 */
1638 newsize = prev_offset + prev_size + next_size;
1639 if (newsize > prev_object->size)
1640 prev_object->size = newsize;
1641
1251 return (FALSE);
1252 }
1253 /*
1254 * Remove any pages that may still be in the object from a previous
1255 * deallocation.
1256 */
1257
1258 vm_object_page_remove(prev_object,
1259 prev_offset + prev_size,
1260 prev_offset + prev_size + next_size, FALSE);
1261
1262 /*
1263 * Extend the object if necessary.
1264 */
1265 newsize = prev_offset + prev_size + next_size;
1266 if (newsize > prev_object->size)
1267 prev_object->size = newsize;
1268
1642 vm_object_unlock(prev_object);
1643 return (TRUE);
1644}
1645
1646/*
1647 * returns page after looking up in shadow chain
1648 */
1649
1650vm_page_t
1651vm_object_page_lookup(object, offset)
1652 vm_object_t object;
1653 vm_offset_t offset;
1654{
1655 vm_page_t m;
1656
1657 if (!(m = vm_page_lookup(object, offset))) {
1269 return (TRUE);
1270}
1271
1272/*
1273 * returns page after looking up in shadow chain
1274 */
1275
1276vm_page_t
1277vm_object_page_lookup(object, offset)
1278 vm_object_t object;
1279 vm_offset_t offset;
1280{
1281 vm_page_t m;
1282
1283 if (!(m = vm_page_lookup(object, offset))) {
1658 if (!object->shadow)
1284 if (!object->backing_object)
1659 return 0;
1660 else
1285 return 0;
1286 else
1661 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1287 return vm_object_page_lookup(object->backing_object, offset + object->backing_object_offset);
1662 }
1663 return m;
1664}
1665
1666#ifdef DDB
1667
1668int
1669_vm_object_in_map(map, object, entry)

--- 24 unchanged lines hidden (view full) ---

1694 entcount = tmpm->nentries;
1695 while (entcount-- && tmpe != &tmpm->header) {
1696 if( _vm_object_in_map(tmpm, object, tmpe)) {
1697 return 1;
1698 }
1699 tmpe = tmpe->next;
1700 }
1701 } else if (obj = entry->object.vm_object) {
1288 }
1289 return m;
1290}
1291
1292#ifdef DDB
1293
1294int
1295_vm_object_in_map(map, object, entry)

--- 24 unchanged lines hidden (view full) ---

1320 entcount = tmpm->nentries;
1321 while (entcount-- && tmpe != &tmpm->header) {
1322 if( _vm_object_in_map(tmpm, object, tmpe)) {
1323 return 1;
1324 }
1325 tmpe = tmpe->next;
1326 }
1327 } else if (obj = entry->object.vm_object) {
1702 for(; obj; obj=obj->shadow)
1328 for(; obj; obj=obj->backing_object)
1703 if( obj == object) {
1704 return 1;
1705 }
1706 }
1707 return 0;
1708}
1709
1710int

--- 32 unchanged lines hidden (view full) ---

1743}
1744
1745
1746void
1747vm_object_check() {
1748 int i;
1749 int maxhash = 0;
1750 vm_object_t object;
1329 if( obj == object) {
1330 return 1;
1331 }
1332 }
1333 return 0;
1334}
1335
1336int

--- 32 unchanged lines hidden (view full) ---

1369}
1370
1371
1372void
1373vm_object_check() {
1374 int i;
1375 int maxhash = 0;
1376 vm_object_t object;
1751 vm_object_hash_entry_t entry;
1752
1753 /*
1377
1378 /*
1754 * make sure that no internal objs are hashed
1755 */
1756 for (i=0; i<VM_OBJECT_HASH_COUNT;i++) {
1757 int lsize = 0;
1758 for (entry = vm_object_hashtable[i].lh_first;
1759 entry != NULL; entry = entry->hash_links.le_next) {
1760 if( entry->object->flags & OBJ_INTERNAL) {
1761 printf("vmochk: internal obj on hash: size: %d\n", entry->object->size);
1762 }
1763 ++lsize;
1764 }
1765 if( lsize > maxhash)
1766 maxhash = lsize;
1767 }
1768
1769 printf("maximum object hash queue size: %d\n", maxhash);
1770
1771 /*
1772 * make sure that internal objs are in a map somewhere
1773 * and none have zero ref counts.
1774 */
1775 for (object = vm_object_list.tqh_first;
1776 object != NULL;
1777 object = object->object_list.tqe_next) {
1379 * make sure that internal objs are in a map somewhere
1380 * and none have zero ref counts.
1381 */
1382 for (object = vm_object_list.tqh_first;
1383 object != NULL;
1384 object = object->object_list.tqe_next) {
1778 if (object->flags & OBJ_INTERNAL) {
1385 if (object->handle == NULL &&
1386 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1779 if (object->ref_count == 0) {
1780 printf("vmochk: internal obj has zero ref count: %d\n",
1781 object->size);
1782 }
1783 if (!vm_object_in_map(object)) {
1387 if (object->ref_count == 0) {
1388 printf("vmochk: internal obj has zero ref count: %d\n",
1389 object->size);
1390 }
1391 if (!vm_object_in_map(object)) {
1784 printf("vmochk: internal obj is not in a map: ref: %d, size: %d, pager: 0x%x, shadow: 0x%x\n",
1785 object->ref_count, object->size, object->pager, object->shadow);
1392 printf("vmochk: internal obj is not in a map: ref: %d, size: %d, pg_data: 0x%x, backing_object: 0x%x\n",
1393 object->ref_count, object->size, object->pg_data, object->backing_object);
1786 }
1787 }
1788 }
1789}
1790
1791/*
1792 * vm_object_print: [ debug ]
1793 */

--- 7 unchanged lines hidden (view full) ---

1801 register int count;
1802
1803 if (object == NULL)
1804 return;
1805
1806 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1807 (int) object, (int) object->size,
1808 object->resident_page_count, object->ref_count);
1394 }
1395 }
1396 }
1397}
1398
1399/*
1400 * vm_object_print: [ debug ]
1401 */

--- 7 unchanged lines hidden (view full) ---

1409 register int count;
1410
1411 if (object == NULL)
1412 return;
1413
1414 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1415 (int) object, (int) object->size,
1416 object->resident_page_count, object->ref_count);
1809 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1810 (int) object->pager, (int) object->paging_offset,
1811 (int) object->shadow, (int) object->shadow_offset);
1417 printf("pg_data=0x%x+0x%x, backing_object=(0x%x)+0x%x\n",
1418 (int) object->pg_data, (int) object->paging_offset,
1419 (int) object->backing_object, (int) object->backing_object_offset);
1812 printf("cache: next=%p, prev=%p\n",
1813 object->cached_list.tqe_next, object->cached_list.tqe_prev);
1814
1815 if (!full)
1816 return;
1817
1818 indent += 2;
1819 count = 0;

--- 19 unchanged lines hidden ---
1420 printf("cache: next=%p, prev=%p\n",
1421 object->cached_list.tqe_next, object->cached_list.tqe_prev);
1422
1423 if (!full)
1424 return;
1425
1426 indent += 2;
1427 count = 0;

--- 19 unchanged lines hidden ---