1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <vm/vm_map_store_ll.h>
30
31boolean_t
32first_free_is_valid_ll( vm_map_t map )
33{
34	vm_map_entry_t	entry, next;
35	entry = vm_map_to_entry(map);
36	next = entry->vme_next;
37	while (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_end) ||
38	       (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_start) &&
39		next != vm_map_to_entry(map))) {
40		entry = next;
41		next = entry->vme_next;
42		if (entry == vm_map_to_entry(map))
43			break;
44	}
45	if (map->first_free != entry) {
46		printf("Bad first_free for map %p: %p should be %p\n",
47		       map, map->first_free, entry);
48		return FALSE;
49	}
50	return TRUE;
51}
52
53/*
54 *	UPDATE_FIRST_FREE:
55 *
56 *	Updates the map->first_free pointer to the
57 *	entry immediately before the first hole in the map.
58 * 	The map should be locked.
59 */
60#define UPDATE_FIRST_FREE_LL(map, new_first_free) 				\
61	MACRO_BEGIN							\
62	if( map->disable_vmentry_reuse == FALSE){		\
63		vm_map_t	UFF_map; 					\
64		vm_map_entry_t	UFF_first_free; 				\
65		vm_map_entry_t	UFF_next_entry; 				\
66		UFF_map = (map); 						\
67		UFF_first_free = (new_first_free);				\
68		UFF_next_entry = UFF_first_free->vme_next; 			\
69		while (vm_map_trunc_page(UFF_next_entry->vme_start) == 		\
70		       vm_map_trunc_page(UFF_first_free->vme_end) || 			\
71		       (vm_map_trunc_page(UFF_next_entry->vme_start) == 		\
72			vm_map_trunc_page(UFF_first_free->vme_start) &&		\
73			UFF_next_entry != vm_map_to_entry(UFF_map))) { 		\
74			UFF_first_free = UFF_next_entry; 			\
75			UFF_next_entry = UFF_first_free->vme_next; 		\
76			if (UFF_first_free == vm_map_to_entry(UFF_map)) 	\
77				break; 						\
78		} 								\
79		UFF_map->first_free = UFF_first_free; 				\
80		assert(first_free_is_valid(UFF_map));				\
81	}									\
82	MACRO_END
83
84#define _vm_map_entry_link_ll(hdr, after_where, entry)			\
85	MACRO_BEGIN							\
86	(hdr)->nentries++;						\
87	(entry)->vme_prev = (after_where);				\
88	(entry)->vme_next = (after_where)->vme_next;			\
89	(entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
90	MACRO_END
91
92#define _vm_map_entry_unlink_ll(hdr, entry)				\
93	MACRO_BEGIN							\
94	(hdr)->nentries--;						\
95	(entry)->vme_next->vme_prev = (entry)->vme_prev; 		\
96	(entry)->vme_prev->vme_next = (entry)->vme_next; 		\
97	MACRO_END
98/*
99 *	Macro:		vm_map_copy_insert
100 *
101 *	Description:
102 *		Link a copy chain ("copy") into a map at the
103 *		specified location (after "where").
104 *	Side effects:
105 *		The copy chain is destroyed.
106 *	Warning:
107 *		The arguments are evaluated multiple times.
108 */
109#define	_vm_map_copy_insert_ll(map, where, copy)				\
110MACRO_BEGIN								\
111	vm_map_t VMCI_map;						\
112	vm_map_entry_t VMCI_where;					\
113	vm_map_copy_t VMCI_copy;					\
114	VMCI_map = (map);						\
115	VMCI_where = (where);						\
116	VMCI_copy = (copy);						\
117	((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
118		->vme_next = (VMCI_where->vme_next);			\
119	((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy))	\
120		->vme_prev = VMCI_where;				\
121	VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries;		\
122	update_first_free_ll(VMCI_map, VMCI_map->first_free);		\
123MACRO_END
124
125
126
127void
128vm_map_store_init_ll( __unused struct vm_map_header *hdr)
129{
130	return;
131}
132
133/*
134 *	vm_map_lookup_entry_ll:	[ internal use only ]
135 *	Use the linked list to find the map entry containing (or
136 *	immediately preceding) the specified address
137 *	in the given map; the entry is returned
138 *	in the "entry" parameter.  The boolean
139 *	result indicates whether the address is
140 *	actually contained in the map.
141 */
142boolean_t
143vm_map_store_lookup_entry_ll(
144	register vm_map_t		map,
145	register vm_map_offset_t	address,
146	vm_map_entry_t		*entry)		/* OUT */
147{
148	register vm_map_entry_t		cur;
149	register vm_map_entry_t		last;
150
151	/*
152	 *	Start looking either from the head of the
153	 *	list, or from the hint.
154	 */
155	cur = map->hint;
156
157	if (cur == vm_map_to_entry(map))
158		cur = cur->vme_next;
159
160	if (address >= cur->vme_start) {
161		/*
162		 *	Go from hint to end of list.
163		 *
164		 *	But first, make a quick check to see if
165		 *	we are already looking at the entry we
166		 *	want (which is usually the case).
167		 *	Note also that we don't need to save the hint
168		 *	here... it is the same hint (unless we are
169		 *	at the header, in which case the hint didn't
170		 *	buy us anything anyway).
171		 */
172		last = vm_map_to_entry(map);
173		if ((cur != last) && (cur->vme_end > address)) {
174			*entry = cur;
175			return(TRUE);
176		}
177	}
178	else {
179		/*
180		 *	Go from start to hint, *inclusively*
181		 */
182		last = cur->vme_next;
183		cur = vm_map_first_entry(map);
184	}
185
186	/*
187	 *	Search linearly
188	 */
189
190	while (cur != last) {
191		if (cur->vme_end > address) {
192			if (address >= cur->vme_start) {
193				/*
194				 *	Save this lookup for future
195				 *	hints, and return
196				 */
197
198				*entry = cur;
199				SAVE_HINT_MAP_READ(map, cur);
200
201				return(TRUE);
202			}
203			break;
204		}
205		cur = cur->vme_next;
206	}
207	*entry = cur->vme_prev;
208	SAVE_HINT_MAP_READ(map, *entry);
209
210	return(FALSE);
211}
212
213void
214vm_map_store_entry_link_ll( struct vm_map_header *mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
215{
216	_vm_map_entry_link_ll( mapHdr, after_where, entry);
217}
218
219void
220vm_map_store_entry_unlink_ll( struct vm_map_header *mapHdr, vm_map_entry_t entry)
221{
222	_vm_map_entry_unlink_ll( mapHdr, entry);
223}
224
225void
226vm_map_store_copy_insert_ll( vm_map_t map, vm_map_entry_t after_where, vm_map_copy_t copy)
227{
228	_vm_map_copy_insert_ll( map, after_where, copy);
229}
230
231void
232vm_map_store_copy_reset_ll( vm_map_copy_t copy, __unused vm_map_entry_t entry, __unused int nentries)
233{
234	copy->cpy_hdr.nentries = 0;
235	vm_map_copy_first_entry(copy) =
236		vm_map_copy_last_entry(copy) =
237			vm_map_copy_to_entry(copy);
238
239}
240
241void
242update_first_free_ll( vm_map_t map, vm_map_entry_t new_first_free)
243{
244	UPDATE_FIRST_FREE_LL( map, new_first_free);
245}
246
247