1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <vm/vm_map_store_ll.h>
30
31boolean_t
32first_free_is_valid_ll( vm_map_t map )
33{
34	vm_map_entry_t	entry, next;
35	entry = vm_map_to_entry(map);
36	next = entry->vme_next;
37	while (vm_map_trunc_page(next->vme_start,
38				 VM_MAP_PAGE_MASK(map)) ==
39	       vm_map_trunc_page(entry->vme_end,
40				 VM_MAP_PAGE_MASK(map)) ||
41	       (vm_map_trunc_page(next->vme_start,
42				  VM_MAP_PAGE_MASK(map)) ==
43		vm_map_trunc_page(entry->vme_start,
44				  VM_MAP_PAGE_MASK(map)) &&
45		next != vm_map_to_entry(map))) {
46		entry = next;
47		next = entry->vme_next;
48		if (entry == vm_map_to_entry(map))
49			break;
50	}
51	if (map->first_free != entry) {
52		printf("Bad first_free for map %p: %p should be %p\n",
53		       map, map->first_free, entry);
54		return FALSE;
55	}
56	return TRUE;
57}
58
59/*
60 *	UPDATE_FIRST_FREE:
61 *
62 *	Updates the map->first_free pointer to the
63 *	entry immediately before the first hole in the map.
64 * 	The map should be locked.
65 */
66#define UPDATE_FIRST_FREE_LL(map, new_first_free)			\
67	MACRO_BEGIN							\
68	if( map->disable_vmentry_reuse == FALSE){			\
69		vm_map_t	UFF_map;				\
70		vm_map_entry_t	UFF_first_free;				\
71		vm_map_entry_t	UFF_next_entry;				\
72		UFF_map = (map);					\
73		UFF_first_free = (new_first_free);			\
74		UFF_next_entry = UFF_first_free->vme_next;		\
75		while (vm_map_trunc_page(UFF_next_entry->vme_start,	\
76					 VM_MAP_PAGE_MASK(UFF_map)) ==	\
77		       vm_map_trunc_page(UFF_first_free->vme_end,	\
78					 VM_MAP_PAGE_MASK(UFF_map)) ||	\
79		       (vm_map_trunc_page(UFF_next_entry->vme_start,	\
80					  VM_MAP_PAGE_MASK(UFF_map)) ==	\
81			vm_map_trunc_page(UFF_first_free->vme_start,	\
82					  VM_MAP_PAGE_MASK(UFF_map)) &&	\
83			UFF_next_entry != vm_map_to_entry(UFF_map))) {	\
84			UFF_first_free = UFF_next_entry;		\
85			UFF_next_entry = UFF_first_free->vme_next;	\
86			if (UFF_first_free == vm_map_to_entry(UFF_map))	\
87				break;					\
88		}							\
89		UFF_map->first_free = UFF_first_free;			\
90		assert(first_free_is_valid(UFF_map));			\
91	}								\
92	MACRO_END
93
94#define _vm_map_entry_link_ll(hdr, after_where, entry)			\
95	MACRO_BEGIN							\
96	if (entry->map_aligned) {					\
97		assert(VM_MAP_PAGE_ALIGNED((entry->vme_start),		\
98					   VM_MAP_HDR_PAGE_MASK((hdr))));\
99		assert(VM_MAP_PAGE_ALIGNED((entry->vme_end),		\
100					   VM_MAP_HDR_PAGE_MASK((hdr))));\
101	}								\
102	(hdr)->nentries++;						\
103	(entry)->vme_prev = (after_where);				\
104	(entry)->vme_next = (after_where)->vme_next;			\
105	(entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
106	MACRO_END
107
108#define _vm_map_entry_unlink_ll(hdr, entry)				\
109	MACRO_BEGIN							\
110	(hdr)->nentries--;						\
111	(entry)->vme_next->vme_prev = (entry)->vme_prev; 		\
112	(entry)->vme_prev->vme_next = (entry)->vme_next; 		\
113	MACRO_END
114/*
115 *	Macro:		vm_map_copy_insert
116 *
117 *	Description:
118 *		Link a copy chain ("copy") into a map at the
119 *		specified location (after "where").
120 *	Side effects:
121 *		The copy chain is destroyed.
122 *	Warning:
123 *		The arguments are evaluated multiple times.
124 */
125#define	_vm_map_copy_insert_ll(map, where, copy)				\
126MACRO_BEGIN								\
127	vm_map_t VMCI_map;						\
128	vm_map_entry_t VMCI_where;					\
129	vm_map_copy_t VMCI_copy;					\
130	VMCI_map = (map);						\
131	VMCI_where = (where);						\
132	VMCI_copy = (copy);						\
133	((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
134		->vme_next = (VMCI_where->vme_next);			\
135	((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy))	\
136		->vme_prev = VMCI_where;				\
137	VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries;		\
138	update_first_free_ll(VMCI_map, VMCI_map->first_free);		\
139MACRO_END
140
141
142
143void
144vm_map_store_init_ll( __unused struct vm_map_header *hdr)
145{
146	return;
147}
148
149/*
150 *	vm_map_lookup_entry_ll:	[ internal use only ]
151 *	Use the linked list to find the map entry containing (or
152 *	immediately preceding) the specified address
153 *	in the given map; the entry is returned
154 *	in the "entry" parameter.  The boolean
155 *	result indicates whether the address is
156 *	actually contained in the map.
157 */
158boolean_t
159vm_map_store_lookup_entry_ll(
160	register vm_map_t		map,
161	register vm_map_offset_t	address,
162	vm_map_entry_t		*entry)		/* OUT */
163{
164	register vm_map_entry_t		cur;
165	register vm_map_entry_t		last;
166
167	/*
168	 *	Start looking either from the head of the
169	 *	list, or from the hint.
170	 */
171	cur = map->hint;
172
173	if (cur == vm_map_to_entry(map))
174		cur = cur->vme_next;
175
176	if (address >= cur->vme_start) {
177		/*
178		 *	Go from hint to end of list.
179		 *
180		 *	But first, make a quick check to see if
181		 *	we are already looking at the entry we
182		 *	want (which is usually the case).
183		 *	Note also that we don't need to save the hint
184		 *	here... it is the same hint (unless we are
185		 *	at the header, in which case the hint didn't
186		 *	buy us anything anyway).
187		 */
188		last = vm_map_to_entry(map);
189		if ((cur != last) && (cur->vme_end > address)) {
190			*entry = cur;
191			return(TRUE);
192		}
193	}
194	else {
195		/*
196		 *	Go from start to hint, *inclusively*
197		 */
198		last = cur->vme_next;
199		cur = vm_map_first_entry(map);
200	}
201
202	/*
203	 *	Search linearly
204	 */
205
206	while (cur != last) {
207		if (cur->vme_end > address) {
208			if (address >= cur->vme_start) {
209				/*
210				 *	Save this lookup for future
211				 *	hints, and return
212				 */
213
214				*entry = cur;
215				SAVE_HINT_MAP_READ(map, cur);
216
217				return(TRUE);
218			}
219			break;
220		}
221		cur = cur->vme_next;
222	}
223	*entry = cur->vme_prev;
224	SAVE_HINT_MAP_READ(map, *entry);
225
226	return(FALSE);
227}
228
229void
230vm_map_store_entry_link_ll( struct vm_map_header *mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
231{
232	_vm_map_entry_link_ll( mapHdr, after_where, entry);
233}
234
235void
236vm_map_store_entry_unlink_ll( struct vm_map_header *mapHdr, vm_map_entry_t entry)
237{
238	_vm_map_entry_unlink_ll( mapHdr, entry);
239}
240
241void
242vm_map_store_copy_insert_ll( vm_map_t map, vm_map_entry_t after_where, vm_map_copy_t copy)
243{
244	_vm_map_copy_insert_ll( map, after_where, copy);
245}
246
247void
248vm_map_store_copy_reset_ll( vm_map_copy_t copy, __unused vm_map_entry_t entry, __unused int nentries)
249{
250	copy->cpy_hdr.nentries = 0;
251	vm_map_copy_first_entry(copy) =
252		vm_map_copy_last_entry(copy) =
253			vm_map_copy_to_entry(copy);
254
255}
256
257void
258update_first_free_ll( vm_map_t map, vm_map_entry_t new_first_free)
259{
260	UPDATE_FIRST_FREE_LL( map, new_first_free);
261}
262
263