1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if CONFIG_FREEZE
30
31#ifndef CONFIG_MEMORYSTATUS
32#error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS"
33#endif
34
35#include <vm/default_freezer.h>
36
37/*
38 * Indicates that a page has been faulted back in.
39 */
40#define FREEZER_OFFSET_ABSENT ((vm_object_offset_t)(-1))
41
42lck_grp_attr_t	default_freezer_handle_lck_grp_attr;
43lck_grp_t	default_freezer_handle_lck_grp;
44
45void
46default_freezer_init(void)
47{
48	lck_grp_attr_setdefault(&default_freezer_handle_lck_grp_attr);
49	lck_grp_init(&default_freezer_handle_lck_grp, "default_freezer_handle",
50		     &default_freezer_handle_lck_grp_attr);
51
52}
53
54
55/*
56 * Create the mapping table that will
57 * tell us the object/offset pair that
58 * corresponds to the page being sent
59 * out or being brought back in.
60 */
61
62default_freezer_mapping_table_t
63default_freezer_mapping_create(vm_object_t object, vm_offset_t offset)
64{
65	default_freezer_mapping_table_t table;
66
67	table = kalloc(sizeof(struct default_freezer_mapping_table));
68	if (table) {
69		memset(table, 0, sizeof(*table));
70	} else {
71		panic("Could not allocate mapping table\n");
72	}
73
74	table->object = object;
75	table->offset = offset;
76
77	return table;
78}
79
80/*
81 * Table modifications/lookup are done behind
82 * the compact_object lock.
83 */
84
85void
86default_freezer_mapping_free(default_freezer_mapping_table_t *table_p, boolean_t all)
87{
88	default_freezer_mapping_table_t freezer_table = *table_p;
89	assert(freezer_table);
90
91	if (all) {
92		do {
93			default_freezer_mapping_table_t next = freezer_table->next;
94			kfree(freezer_table, sizeof(*freezer_table));
95			freezer_table = next;
96		} while (freezer_table);
97	} else {
98		kfree(freezer_table, sizeof(*freezer_table));
99	}
100}
101
102kern_return_t
103default_freezer_mapping_store(
104		default_freezer_mapping_table_t table,
105		memory_object_offset_t table_offset,
106		memory_object_t memory_object,
107		memory_object_offset_t offset)
108{
109	default_freezer_mapping_table_entry_t entry;
110	uint32_t index;
111
112	assert(table);
113
114	while (table->next) {
115		table = table->next;
116	}
117
118	if (table->index >= MAX_FREEZE_TABLE_ENTRIES) {
119		vm_object_t compact_object = table->object;
120		default_freezer_mapping_table_t next;
121
122		next = default_freezer_mapping_create(compact_object, table_offset);
123		if (!next) {
124			return KERN_FAILURE;
125		}
126		table->next = next;
127	}
128
129	index = (table)->index++;
130	entry = &(table)->entry[index];
131
132	entry->memory_object = memory_object;
133	entry->offset = offset;
134
135	return KERN_SUCCESS;
136}
137
138kern_return_t
139default_freezer_mapping_update(
140		default_freezer_mapping_table_t table,
141		memory_object_t memory_object,
142		memory_object_offset_t offset,
143		memory_object_offset_t *table_offset, /*OUT: contains the offset into the compact object*/
144		boolean_t remove_entry)
145{
146
147	kern_return_t kr = KERN_SUCCESS;
148	vm_object_offset_t compact_offset;
149	default_freezer_mapping_table_entry_t entry;
150	uint32_t index = 0;
151
152	if (table == NULL){
153		return KERN_FAILURE;
154	}
155
156	compact_offset = table->offset;
157
158	while (1) {
159		if (index >= table->index) {
160			if (table->next) {
161				table = table->next;
162				index = 0;
163			} else {
164				/* End of tables and we didn't find our candidate entry */
165				kr = KERN_FAILURE;
166				break;
167			}
168		}
169
170		entry = &table->entry[index];
171
172		if (memory_object == entry->memory_object && offset == entry->offset) {
173			if (remove_entry == TRUE) {
174				/*
175				 * Mark the page absent whilst retaining the object
176				 * for cleanup during thaw.
177				 */
178				entry->offset = FREEZER_OFFSET_ABSENT;
179			}
180			if (table_offset != NULL) {
181				*table_offset = compact_offset;
182			}
183			break;
184		}
185
186		index++;
187		compact_offset += PAGE_SIZE;
188	}
189	return kr;
190}
191
192
193
194/*
195 * Create a freezer memory object for this
196 * vm object. This will be one of the vm
197 * objects that will pack the compact object.
198 */
199void
200default_freezer_memory_object_create(
201			vm_object_t	object,
202			default_freezer_handle_t df_handle)
203{
204
205	default_freezer_memory_object_t fo = NULL;
206
207	fo = kalloc(sizeof(struct default_freezer_memory_object));
208
209	if (fo) {
210		memory_object_control_t control = NULL;
211
212		memset(fo, 0, sizeof(*fo));
213
214		control = memory_object_control_allocate(object);
215		assert (control != MEMORY_OBJECT_CONTROL_NULL);
216
217		df_memory_object_init((memory_object_t)fo, control, 0);
218		fo->fo_df_handle = df_handle;
219
220		default_freezer_handle_reference_locked(fo->fo_df_handle);
221
222		object->pager = (memory_object_t)fo;
223		object->pager_created = TRUE;
224		object->pager_initialized = TRUE;
225		object->pager_ready = TRUE;
226		object->pager_trusted = TRUE;
227		object->pager_control = control;
228	} else {
229		panic(" Could not allocate freezer object\n");
230	}
231}
232
233kern_return_t
234default_freezer_pack(
235	unsigned int	*purgeable_count,
236	unsigned int	*wired_count,
237	unsigned int	*clean_count,
238	unsigned int	*dirty_count,
239	unsigned int	dirty_budget,
240	boolean_t	*shared,
241	vm_object_t	src_object,
242	default_freezer_handle_t df_handle)
243{
244	kern_return_t			kr = KERN_SUCCESS;
245
246	if (df_handle) {
247		default_freezer_handle_lock(df_handle);
248	}
249
250	kr = vm_object_pack(purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared, src_object, df_handle);
251
252	if (df_handle) {
253		default_freezer_handle_unlock(df_handle);
254	}
255
256	return kr;
257}
258
259/*
260 * Called with freezer_handle locked.
261 * default_freezer_pack locks the handle, calls
262 * vm_object_pack which, in turn, will call
263 * default_freezer_pack_page().
264 */
265void
266default_freezer_pack_page(
267		vm_page_t p,
268		default_freezer_handle_t df_handle)
269{
270
271	default_freezer_mapping_table_t freeze_table = NULL;
272	memory_object_t 		memory_object = NULL;
273	vm_object_t			compact_object =  VM_OBJECT_NULL;
274
275	assert(df_handle);
276
277	compact_object = df_handle->dfh_compact_object;
278
279	assert(compact_object);
280
281	freeze_table =  df_handle->dfh_table;
282	memory_object = p->object->pager;
283
284	if (memory_object == NULL) {
285		default_freezer_memory_object_create(p->object, df_handle);
286		memory_object = p->object->pager;
287	} else {
288		assert(df_handle == ((default_freezer_memory_object_t)memory_object)->fo_df_handle);
289	}
290
291	vm_object_lock(compact_object);
292	default_freezer_mapping_store(freeze_table, df_handle->dfh_compact_offset, memory_object, p->offset + p->object->paging_offset);
293	vm_page_rename(p, compact_object, df_handle->dfh_compact_offset, FALSE);
294	vm_object_unlock(compact_object);
295
296	df_handle->dfh_compact_offset += PAGE_SIZE;
297}
298
299
300kern_return_t
301default_freezer_unpack(
302		 default_freezer_handle_t df_handle)
303{
304
305	vm_page_t 				compact_page = VM_PAGE_NULL, src_page = VM_PAGE_NULL;
306	uint32_t 				index = 0;
307	vm_object_t 				src_object = VM_OBJECT_NULL;
308	vm_object_t				compact_object = VM_OBJECT_NULL;
309	memory_object_t				src_mem_object = MEMORY_OBJECT_NULL;
310	memory_object_offset_t			src_offset = 0;
311	vm_object_offset_t			compact_offset = 0;
312	default_freezer_memory_object_t		fo = NULL;
313	default_freezer_mapping_table_t 	freeze_table = NULL;
314	boolean_t				should_unlock_handle = FALSE;
315	kern_return_t				kr;
316
317	assert(df_handle);
318
319	default_freezer_handle_lock(df_handle);
320	should_unlock_handle = TRUE;
321
322	freeze_table = df_handle->dfh_table;
323	compact_object = df_handle->dfh_compact_object;
324
325	assert(compact_object);
326	assert(compact_object->alive);
327	assert(!compact_object->terminating);
328	assert(compact_object->pager_ready);
329
330	/* Bring the pages back in */
331	if ((kr = vm_object_pagein(compact_object)) != KERN_SUCCESS) {
332		if (should_unlock_handle) {
333			default_freezer_handle_unlock(df_handle);
334		}
335        	return (kr);
336	}
337
338	vm_object_lock(compact_object);
339
340	for (index = 0, compact_offset = 0; ; index++, compact_offset += PAGE_SIZE){
341		if (index >= freeze_table->index) {
342			default_freezer_mapping_table_t table_next;
343
344			table_next = freeze_table->next;
345
346			/* Free the tables as we go along */
347			default_freezer_mapping_free(&freeze_table, FALSE);
348
349			if (table_next == NULL){
350				break;
351			}
352
353			freeze_table = table_next;
354			index = 0;
355		}
356
357		/*
358		 * Skip slots that represent deallocated memory objects.
359		 */
360		src_mem_object = freeze_table->entry[index].memory_object;
361		if (src_mem_object == MEMORY_OBJECT_NULL)
362			continue;
363
364		/*
365		 * Skip slots that represent faulted pages.
366		 */
367		src_offset = freeze_table->entry[index].offset;
368		if (src_offset != FREEZER_OFFSET_ABSENT) {
369
370			compact_page = vm_page_lookup(compact_object, compact_offset);
371			assert(compact_page);
372
373			fo = (default_freezer_memory_object_t)src_mem_object;
374
375			src_object = memory_object_control_to_vm_object(fo->fo_pager_control);
376
377			/* Move back over from the freeze object to the original */
378			vm_object_lock(src_object);
379			src_page = vm_page_lookup(src_object, src_offset - src_object->paging_offset);
380			if (src_page != VM_PAGE_NULL){
381				/*
382				 * We might be racing with a VM fault.
383				 * So handle that gracefully.
384				 */
385				assert(src_page->absent == TRUE);
386				VM_PAGE_FREE(src_page);
387			}
388			vm_page_rename(compact_page, src_object, src_offset - src_object->paging_offset, FALSE);
389			vm_object_unlock(src_object);
390		}
391
392	}
393
394	vm_object_unlock(compact_object);
395
396	vm_object_deallocate(compact_object);
397
398	if (should_unlock_handle) {
399		df_handle->dfh_table = NULL;
400		df_handle->dfh_compact_object = VM_OBJECT_NULL;
401		df_handle->dfh_compact_offset = 0;
402		default_freezer_handle_unlock(df_handle);
403	}
404	return (KERN_SUCCESS);
405}
406
407void
408df_memory_object_reference(__unused memory_object_t mem_obj)
409{
410
411	/* No-op */
412}
413
414void
415df_memory_object_deallocate(memory_object_t mem_obj)
416{
417
418	default_freezer_memory_object_t	fo = (default_freezer_memory_object_t)mem_obj;
419
420	assert(fo);
421
422	if (fo->fo_df_handle != NULL) {
423
424		default_freezer_mapping_table_t table = NULL;
425		default_freezer_mapping_table_entry_t entry;
426		boolean_t found = FALSE;
427		uint32_t index = 0;
428		vm_object_t compact_object = VM_OBJECT_NULL;
429
430		default_freezer_handle_lock(fo->fo_df_handle);
431
432		compact_object =  fo->fo_df_handle->dfh_compact_object;
433		table = fo->fo_df_handle->dfh_table;
434
435		if (compact_object == VM_OBJECT_NULL || table == NULL) {
436			/*Nothing to do. A thaw must have cleared it all out.*/
437		} else {
438			vm_object_lock(compact_object);
439
440			/* Remove from table */
441			while (1) {
442				if (index >= table->index) {
443					if (table->next) {
444						table = table->next;
445						index = 0;
446					} else {
447						/* End of tables */
448						break;
449					}
450				}
451
452				entry = &table->entry[index];
453				if (mem_obj == entry->memory_object) {
454					/* It matches, so clear the entry */
455					if (!found) {
456						found = TRUE;
457					}
458					entry->memory_object = MEMORY_OBJECT_NULL;
459					entry->offset = 0;
460				} else if (MEMORY_OBJECT_NULL != entry->memory_object) {
461					/* We have a different valid object; we're done */
462					if (found) {
463						break;
464					}
465				}
466
467				index++;
468			}
469
470			vm_object_unlock(compact_object);
471		}
472
473		if (default_freezer_handle_deallocate_locked(fo->fo_df_handle)) {
474			default_freezer_handle_unlock(fo->fo_df_handle);
475		}
476	}
477
478	kfree(fo, sizeof(*fo));
479}
480
481kern_return_t
482df_memory_object_init(
483		memory_object_t mem_obj,
484		memory_object_control_t control,
485		__unused memory_object_cluster_size_t pager_page_size)
486{
487
488	default_freezer_memory_object_t	fo = (default_freezer_memory_object_t)mem_obj;
489	assert(fo);
490
491	fo->fo_pager_ops = &default_freezer_ops;
492	fo->fo_pager_header.io_bits = IKOT_MEMORY_OBJECT;
493	fo->fo_pager_control = control;
494
495	return KERN_SUCCESS;
496}
497
498kern_return_t
499df_memory_object_terminate(memory_object_t mem_obj)
500{
501
502	default_freezer_memory_object_t	fo = (default_freezer_memory_object_t)mem_obj;
503	assert(fo);
504	memory_object_control_deallocate(fo->fo_pager_control);
505	return KERN_SUCCESS;
506}
507
508
509kern_return_t
510df_memory_object_data_request(
511		memory_object_t mem_obj,
512		memory_object_offset_t offset,
513		memory_object_cluster_size_t length,
514		vm_prot_t protection_required,
515		memory_object_fault_info_t fault_info)
516{
517
518	vm_object_t	src_object = VM_OBJECT_NULL, compact_object = VM_OBJECT_NULL;
519	memory_object_offset_t	compact_offset = 0;
520	memory_object_t pager = NULL;
521	kern_return_t kr = KERN_SUCCESS;
522	boolean_t	drop_object_ref = FALSE;
523	vm_page_t compact_page, dst_page;
524
525	default_freezer_memory_object_t fo = (default_freezer_memory_object_t)mem_obj;
526	default_freezer_handle_t	df_handle = NULL;
527
528	df_handle = fo->fo_df_handle;
529
530	if (df_handle == NULL) {
531		kr = KERN_FAILURE;
532	} else {
533		default_freezer_handle_lock(df_handle);
534
535		src_object = memory_object_control_to_vm_object(fo->fo_pager_control);
536		compact_object = fo->fo_df_handle->dfh_compact_object;
537
538		if (compact_object == NULL) {
539			kr = KERN_FAILURE;
540		} else {
541			vm_object_lock(compact_object);
542			vm_object_reference_locked(compact_object);
543			drop_object_ref = TRUE;
544
545			kr = default_freezer_mapping_update(fo->fo_df_handle->dfh_table,
546								mem_obj,
547								offset,
548								&compact_offset,
549								FALSE);
550			vm_object_unlock(compact_object);
551		}
552		default_freezer_handle_unlock(df_handle);
553	}
554
555
556	if (length == 0){
557		/*Caller is just querying to see if we have the page*/
558		if (drop_object_ref) {
559			vm_object_deallocate(compact_object);
560		}
561		return kr;
562	}
563
564	if (kr != KERN_SUCCESS){
565
566		unsigned int request_flags;
567		upl_t        upl;
568		unsigned int page_list_count = 0;
569
570		request_flags = UPL_NO_SYNC | UPL_RET_ONLY_ABSENT | UPL_SET_LITE | UPL_SET_INTERNAL;
571		/*
572		 * Should we decide to activate USE_PRECIOUS (from default_pager_internal.h)
573		 * here, then the request_flags will need to add these to the ones above:
574		 *
575		 * request_flags |= UPL_PRECIOUS | UPL_CLEAN_IN_PLACE
576		 */
577		request_flags |= UPL_REQUEST_SET_DIRTY;
578
579		memory_object_super_upl_request(fo->fo_pager_control,
580						(memory_object_offset_t)offset,
581						PAGE_SIZE, PAGE_SIZE,
582						&upl, NULL, &page_list_count,
583						request_flags);
584		upl_range_needed(upl, 0, 1);
585
586		upl_abort(upl, UPL_ABORT_UNAVAILABLE);
587		upl_deallocate(upl);
588
589		if (drop_object_ref) {
590			vm_object_deallocate(compact_object);
591		}
592
593		return KERN_SUCCESS;
594	}
595	vm_object_lock(compact_object);
596
597	assert(compact_object->alive);
598	assert(!compact_object->terminating);
599
600	/*
601	 * note that the activity_in_progress could be non-zero, but
602	 * the pager has not yet been created since the activity_in_progress
603	 * count is bumped via vm_pageout_cluster, while the pager isn't created
604	 * until the pageout thread runs and starts to process the pages
605	 * placed on the I/O queue... once the processing of the compact object
606	 * proceeds to the point where it's placed the first page on the I/O
607	 * queue, we need to wait until the entire freeze operation has completed.
608	 */
609	vm_object_paging_wait(compact_object, THREAD_UNINT);
610
611	if (compact_object->pager_ready) {
612		vm_object_paging_begin(compact_object);
613
614		compact_object->blocked_access = TRUE;
615		pager = (memory_object_t)compact_object->pager;
616
617		vm_object_unlock(compact_object);
618
619		((vm_object_fault_info_t) fault_info)->io_sync = TRUE;
620
621		/*
622		 * We have a reference on both the default_freezer
623		 * memory object handle and the compact object.
624		 */
625		kr = dp_memory_object_data_request(pager,
626						   compact_offset,
627						   length,
628						   protection_required,
629						   fault_info);
630		if (kr != KERN_SUCCESS)
631			panic("%d: default_freezer TOC pointed us to default_pager incorrectly\n", kr);
632
633		vm_object_lock(compact_object);
634
635		compact_object->blocked_access = FALSE;
636		vm_object_paging_end(compact_object);
637	}
638	vm_object_lock(src_object);
639
640	if ((compact_page = vm_page_lookup(compact_object, compact_offset)) != VM_PAGE_NULL){
641
642		dst_page = vm_page_lookup(src_object, offset - src_object->paging_offset);
643
644		if (dst_page && !dst_page->absent){
645			/*
646			 * Someone raced us here and unpacked
647			 * the object behind us.
648			 * So cleanup before we return.
649			 */
650			VM_PAGE_FREE(compact_page);
651		} else {
652			if (dst_page != NULL) {
653				VM_PAGE_FREE(dst_page);
654			}
655			vm_page_rename(compact_page, src_object, offset - src_object->paging_offset, FALSE);
656
657			if (default_freezer_mapping_update(fo->fo_df_handle->dfh_table,
658							   mem_obj,
659							   offset,
660							   NULL,
661							   TRUE) != KERN_SUCCESS) {
662				printf("Page for object: 0x%lx at offset: 0x%lx not found in table\n", (uintptr_t)src_object, (uintptr_t)offset);
663			}
664
665			PAGE_WAKEUP_DONE(compact_page);
666		}
667	} else {
668		printf("%d: default_freezer: compact_object doesn't have the page for object 0x%lx at offset 0x%lx \n", kr, (uintptr_t)compact_object, (uintptr_t)compact_offset);
669		kr = KERN_SUCCESS;
670	}
671	vm_object_unlock(src_object);
672	vm_object_unlock(compact_object);
673	vm_object_deallocate(compact_object);
674
675	return kr;
676}
677
678kern_return_t
679df_memory_object_data_return(
680		__unused memory_object_t		mem_obj,
681		__unused memory_object_offset_t	offset,
682		__unused memory_object_cluster_size_t			size,
683		__unused memory_object_offset_t	*resid_offset,
684		__unused int		*io_error,
685		__unused boolean_t	dirty,
686		__unused boolean_t	kernel_copy,
687		__unused int	upl_flags)
688{
689
690	panic(" default_freezer: df_memory_object_data_return should not be called\n");
691	return KERN_SUCCESS;
692}
693
694kern_return_t
695df_memory_object_data_initialize(
696		__unused memory_object_t mem_obj,
697		__unused  memory_object_offset_t offset,
698		__unused memory_object_cluster_size_t size)
699{
700
701	panic(" default_freezer: df_memory_object_data_initialize should not be called\n");
702	return KERN_SUCCESS;
703}
704
705kern_return_t
706df_memory_object_data_unlock(
707		__unused memory_object_t mem_obj,
708		__unused memory_object_offset_t offset,
709		__unused memory_object_size_t length,
710		__unused vm_prot_t prot)
711{
712
713	panic(" default_freezer: df_memory_object_data_unlock should not be called\n");
714	return KERN_FAILURE;
715}
716
717kern_return_t
718df_memory_object_synchronize(
719		__unused memory_object_t mem_obj,
720		__unused memory_object_offset_t offset,
721		__unused memory_object_size_t length,
722		__unused vm_sync_t flags)
723{
724
725	panic(" default_freezer: df_memory_object_synchronize should not be called\n");
726	return KERN_FAILURE;
727}
728
729kern_return_t
730df_memory_object_map(
731		__unused memory_object_t mem_obj,
732		__unused vm_prot_t prot)
733{
734
735	panic(" default_freezer: df_memory_object_map should not be called\n");
736	return KERN_FAILURE;
737}
738
739kern_return_t
740df_memory_object_last_unmap(__unused memory_object_t mem_obj)
741{
742
743	panic(" default_freezer: df_memory_object_last_unmap should not be called\n");
744	return KERN_FAILURE;
745}
746
747
748kern_return_t
749df_memory_object_data_reclaim(
750		__unused memory_object_t mem_obj,
751		__unused boolean_t reclaim_backing_store)
752{
753
754	panic("df_memory_object_data_reclaim\n");
755	return KERN_SUCCESS;
756}
757
758
759/*
760 * The freezer handle is used to make sure that
761 * we don't race against the lookup and termination
762 * of the compact object.
763 */
764
765void
766default_freezer_handle_lock(default_freezer_handle_t df_handle) {
767	lck_rw_lock_exclusive(&df_handle->dfh_lck);
768}
769
770void
771default_freezer_handle_unlock(default_freezer_handle_t df_handle) {
772	lck_rw_done(&df_handle->dfh_lck);
773}
774
775default_freezer_handle_t
776default_freezer_handle_allocate(void)
777{
778
779	default_freezer_handle_t		df_handle = NULL;
780	df_handle = kalloc(sizeof(struct default_freezer_handle));
781
782	if (df_handle) {
783		memset(df_handle, 0, sizeof(struct default_freezer_handle));
784		lck_rw_init(&df_handle->dfh_lck, &default_freezer_handle_lck_grp, NULL);
785		/* No one knows of this handle yet so no need to lock it. */
786		default_freezer_handle_reference_locked(df_handle);
787	} else {
788		panic("Failed to allocated default_freezer_handle structure\n");
789	}
790	return df_handle;
791}
792
793kern_return_t
794default_freezer_handle_init(
795	default_freezer_handle_t df_handle)
796{
797	kern_return_t				kr = KERN_SUCCESS;
798	vm_object_t				compact_object = VM_OBJECT_NULL;
799
800	if (df_handle == NULL || df_handle->dfh_table != NULL) {
801		kr = KERN_FAILURE;
802	} else {
803		/* Create our compact object */
804		compact_object = vm_object_allocate((vm_map_offset_t)(VM_MAX_ADDRESS) - (vm_map_offset_t)(VM_MIN_ADDRESS));
805		if (!compact_object) {
806			kr = KERN_FAILURE;
807		} else {
808			df_handle->dfh_compact_object = compact_object;
809			df_handle->dfh_compact_offset = 0;
810			df_handle->dfh_table = default_freezer_mapping_create(df_handle->dfh_compact_object, df_handle->dfh_compact_offset);
811			if (!df_handle->dfh_table) {
812				kr = KERN_FAILURE;
813			}
814		}
815	}
816
817	return kr;
818}
819
820void
821default_freezer_handle_reference_locked(
822	default_freezer_handle_t df_handle)
823{
824	assert(df_handle);
825	df_handle->dfh_ref_count++;
826}
827
828void
829default_freezer_handle_deallocate(
830	default_freezer_handle_t df_handle)
831{
832	assert(df_handle);
833	default_freezer_handle_lock(df_handle);
834	if (default_freezer_handle_deallocate_locked(df_handle)) {
835		default_freezer_handle_unlock(df_handle);
836	}
837}
838
839boolean_t
840default_freezer_handle_deallocate_locked(
841	default_freezer_handle_t df_handle)
842{
843	boolean_t	should_unlock = TRUE;
844
845	assert(df_handle);
846	df_handle->dfh_ref_count--;
847	if (df_handle->dfh_ref_count == 0) {
848		lck_rw_destroy(&df_handle->dfh_lck, &default_freezer_handle_lck_grp);
849		kfree(df_handle, sizeof(struct default_freezer_handle));
850		should_unlock = FALSE;
851	}
852	return should_unlock;
853}
854
855void
856default_freezer_pageout(
857	default_freezer_handle_t df_handle)
858{
859	assert(df_handle);
860
861	vm_object_pageout(df_handle->dfh_compact_object);
862}
863
864#endif /* CONFIG_FREEZE */
865