Deleted Added
full compact
vm_radix.c (226873) vm_radix.c (226876)
1/*
2 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29
30/*
31 * Radix tree implementation.
32 */
33
34#include <sys/cdefs.h>
35
36#include <sys/param.h>
37#include <sys/conf.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/param.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/ktr.h>
46#include <vm/uma.h>
47#include <vm/vm.h>
1/*
2 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29
30/*
31 * Radix tree implementation.
32 */
33
34#include <sys/cdefs.h>
35
36#include <sys/param.h>
37#include <sys/conf.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/param.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/ktr.h>
46#include <vm/uma.h>
47#include <vm/vm.h>
48#include <vm/vm_param.h>
48#include <vm/vm_extern.h>
49#include <vm/vm_kern.h>
50#include <vm/vm_page.h>
51#include <vm/vm_radix.h>
52#include <vm/vm_object.h>
53
54#include <sys/kdb.h>
55
56CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE);
57
58static uma_zone_t vm_radix_node_zone;
59
49#include <vm/vm_extern.h>
50#include <vm/vm_kern.h>
51#include <vm/vm_page.h>
52#include <vm/vm_radix.h>
53#include <vm/vm_object.h>
54
55#include <sys/kdb.h>
56
57CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE);
58
59static uma_zone_t vm_radix_node_zone;
60
60#if 0
61#ifndef UMA_MD_SMALL_ALLOC
61static void *
62vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait)
63{
64 vm_offset_t addr;
65 vm_page_t m;
66 int pflags;
67
68 /* Inform UMA that this allocator uses kernel_map. */
69 *flags = UMA_SLAB_KERNEL;
70
71 pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
72
73 /*
74 * As kmem_alloc_nofault() can however fail, let just assume that
75 * M_NOWAIT is on and act accordingly.
76 */
77 pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT :
78 VM_ALLOC_SYSTEM;
79 if ((wait & M_ZERO) != 0)
80 pflags |= VM_ALLOC_ZERO;
81 addr = kmem_alloc_nofault(kernel_map, size);
82 if (addr == 0)
83 return (NULL);
84
85 /* Just one page allocation is assumed here. */
86 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
87 pflags);
88 if (m == NULL) {
89 kmem_free(kernel_map, addr, size);
90 return (NULL);
91 }
92 if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
93 pmap_zero_page(m);
94 pmap_qenter(addr, &m, 1);
95 return ((void *)addr);
96}
97
98static void
99vm_radix_node_zone_freef(void *item, int size, uint8_t flags)
100{
101 vm_page_t m;
102 vm_offset_t voitem;
103
104 MPASS((flags & UMA_SLAB_KERNEL) != 0);
105
106 /* Just one page allocation is assumed here. */
107 voitem = (vm_offset_t)item;
108 m = PHYS_TO_VM_PAGE(pmap_kextract(voitem));
109 pmap_qremove(voitem, 1);
110 vm_page_free(m);
111 kmem_free(kernel_map, voitem, size);
112}
113
114static void
115init_vm_radix_alloc(void *dummy __unused)
116{
117
118 uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf);
119 uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef);
120}
121SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL);
122#endif
123
124/*
125 * Radix node zone destructor.
126 */
127#ifdef INVARIANTS
128static void
129vm_radix_node_zone_dtor(void *mem, int size, void *arg)
130{
131 struct vm_radix_node *rnode;
132
133 rnode = mem;
134 KASSERT(rnode->rn_count == 0,
135 ("vm_radix_node_put: Freeing a node with %d children\n",
136 rnode->rn_count));
137}
138#endif
139
140/*
141 * Allocate a radix node. Initializes all elements to 0.
142 */
62static void *
63vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait)
64{
65 vm_offset_t addr;
66 vm_page_t m;
67 int pflags;
68
69 /* Inform UMA that this allocator uses kernel_map. */
70 *flags = UMA_SLAB_KERNEL;
71
72 pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
73
74 /*
75 * As kmem_alloc_nofault() can however fail, let just assume that
76 * M_NOWAIT is on and act accordingly.
77 */
78 pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT :
79 VM_ALLOC_SYSTEM;
80 if ((wait & M_ZERO) != 0)
81 pflags |= VM_ALLOC_ZERO;
82 addr = kmem_alloc_nofault(kernel_map, size);
83 if (addr == 0)
84 return (NULL);
85
86 /* Just one page allocation is assumed here. */
87 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
88 pflags);
89 if (m == NULL) {
90 kmem_free(kernel_map, addr, size);
91 return (NULL);
92 }
93 if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
94 pmap_zero_page(m);
95 pmap_qenter(addr, &m, 1);
96 return ((void *)addr);
97}
98
99static void
100vm_radix_node_zone_freef(void *item, int size, uint8_t flags)
101{
102 vm_page_t m;
103 vm_offset_t voitem;
104
105 MPASS((flags & UMA_SLAB_KERNEL) != 0);
106
107 /* Just one page allocation is assumed here. */
108 voitem = (vm_offset_t)item;
109 m = PHYS_TO_VM_PAGE(pmap_kextract(voitem));
110 pmap_qremove(voitem, 1);
111 vm_page_free(m);
112 kmem_free(kernel_map, voitem, size);
113}
114
115static void
116init_vm_radix_alloc(void *dummy __unused)
117{
118
119 uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf);
120 uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef);
121}
122SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL);
123#endif
124
125/*
126 * Radix node zone destructor.
127 */
128#ifdef INVARIANTS
129static void
130vm_radix_node_zone_dtor(void *mem, int size, void *arg)
131{
132 struct vm_radix_node *rnode;
133
134 rnode = mem;
135 KASSERT(rnode->rn_count == 0,
136 ("vm_radix_node_put: Freeing a node with %d children\n",
137 rnode->rn_count));
138}
139#endif
140
141/*
142 * Allocate a radix node. Initializes all elements to 0.
143 */
143static struct vm_radix_node *
144static __inline struct vm_radix_node *
144vm_radix_node_get(void)
145{
146
147 return (uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO));
148}
149
150/*
151 * Free radix node.
152 */
145vm_radix_node_get(void)
146{
147
148 return (uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO));
149}
150
151/*
152 * Free radix node.
153 */
153static void
154static __inline void
154vm_radix_node_put(struct vm_radix_node *rnode)
155{
156
157 uma_zfree(vm_radix_node_zone, rnode);
158}
159
160/*
161 * Return the position in the array for a given level.
162 */
155vm_radix_node_put(struct vm_radix_node *rnode)
156{
157
158 uma_zfree(vm_radix_node_zone, rnode);
159}
160
161/*
162 * Return the position in the array for a given level.
163 */
163static inline int
164static __inline int
164vm_radix_slot(vm_pindex_t index, int level)
165{
166
167 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
168}
169
170void
171vm_radix_init(void)
172{
173
174 vm_radix_node_zone = uma_zcreate("RADIX NODE",
175 sizeof(struct vm_radix_node), NULL,
176#ifdef INVARIANTS
177 vm_radix_node_zone_dtor,
178#else
179 NULL,
180#endif
165vm_radix_slot(vm_pindex_t index, int level)
166{
167
168 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
169}
170
171void
172vm_radix_init(void)
173{
174
175 vm_radix_node_zone = uma_zcreate("RADIX NODE",
176 sizeof(struct vm_radix_node), NULL,
177#ifdef INVARIANTS
178 vm_radix_node_zone_dtor,
179#else
180 NULL,
181#endif
181 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
182 NULL, NULL, VM_RADIX_HEIGHT, UMA_ZONE_VM);
182}
183
184/*
183}
184
185/*
186 * Extract the root node and height from a radix tree with a single load.
187 */
188static __inline int
189vm_radix_height(struct vm_radix *rtree, struct vm_radix_node **rnode)
190{
191 uintptr_t root;
192 int height;
193
194 root = rtree->rt_root;
195 height = root & VM_RADIX_HEIGHT;
196 *rnode = (struct vm_radix_node *)(root - height);
197 return (height);
198}
199
200
201/*
202 * Set the root node and height for a radix tree.
203 */
204static inline void
205vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode,
206 int height)
207{
208 uintptr_t root;
209
210 root = (uintptr_t)rnode | height;
211 rtree->rt_root = root;
212}
213
214/*
185 * Inserts the key-value pair in to the radix tree. Returns errno.
186 * Panics if the key already exists.
187 */
188int
189vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, void *val)
190{
191 struct vm_radix_node *rnode;
215 * Inserts the key-value pair in to the radix tree. Returns errno.
216 * Panics if the key already exists.
217 */
218int
219vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, void *val)
220{
221 struct vm_radix_node *rnode;
192 int slot;
222 struct vm_radix_node *root;
193 int level;
223 int level;
224 int slot;
194
195 CTR3(KTR_VM,
196 "insert: tree %p, index %p, val %p", rtree, (void *)index, val);
197 if (index == -1)
198 panic("vm_radix_insert: -1 is not a valid index.\n");
225
226 CTR3(KTR_VM,
227 "insert: tree %p, index %p, val %p", rtree, (void *)index, val);
228 if (index == -1)
229 panic("vm_radix_insert: -1 is not a valid index.\n");
230 level = vm_radix_height(rtree, &root);
199 /*
200 * Increase the height by adding nodes at the root until
201 * there is sufficient space.
202 */
231 /*
232 * Increase the height by adding nodes at the root until
233 * there is sufficient space.
234 */
203 while (rtree->rt_height == 0 ||
204 index > VM_RADIX_MAX(rtree->rt_height)) {
235 while (level == 0 || index > VM_RADIX_MAX(level)) {
205 CTR3(KTR_VM, "insert: expanding %jd > %jd height %d",
236 CTR3(KTR_VM, "insert: expanding %jd > %jd height %d",
206 index, VM_RADIX_MAX(rtree->rt_height), rtree->rt_height);
237 index, VM_RADIX_MAX(level), level);
238 level++;
239 KASSERT(level <= VM_RADIX_LIMIT,
240 ("vm_radix_insert: Tree %p height %d too tall",
241 rtree, level));
207 /*
208 * Only allocate tree nodes if they are needed.
209 */
242 /*
243 * Only allocate tree nodes if they are needed.
244 */
210 if (rtree->rt_root == NULL || rtree->rt_root->rn_count != 0) {
245 if (root == NULL || root->rn_count != 0) {
211 rnode = vm_radix_node_get();
212 if (rnode == NULL)
213 return (ENOMEM);
246 rnode = vm_radix_node_get();
247 if (rnode == NULL)
248 return (ENOMEM);
214 if (rtree->rt_root) {
215 rnode->rn_child[0] = rtree->rt_root;
249 /*
250 * Store the new pointer with a memory barrier so
251 * that it is visible before the new root.
252 */
253 if (root) {
254 atomic_store_rel_ptr((volatile uintptr_t *)
255 &rnode->rn_child[0], (uintptr_t)root);
216 rnode->rn_count = 1;
217 }
256 rnode->rn_count = 1;
257 }
218 rtree->rt_root = rnode;
258 root = rnode;
219 }
259 }
220 rtree->rt_height++;
221 KASSERT(rtree->rt_height <= VM_RADIX_LIMIT,
222 ("vm_radix_insert: Tree %p height %d too tall", rtree,
223 rtree->rt_height));
260 vm_radix_setroot(rtree, root, level);
224 }
225
226 /* Now that the tree is tall enough, fill in the path to the index. */
261 }
262
263 /* Now that the tree is tall enough, fill in the path to the index. */
227 rnode = rtree->rt_root;
228 for (level = rtree->rt_height - 1; level > 0; level--) {
264 rnode = root;
265 for (level = level - 1; level > 0; level--) {
229 slot = vm_radix_slot(index, level);
230 /* Add the required intermidiate nodes. */
231 if (rnode->rn_child[slot] == NULL) {
232 rnode->rn_child[slot] = vm_radix_node_get();
233 if (rnode->rn_child[slot] == NULL)
234 return (ENOMEM);
235 rnode->rn_count++;
236 }
237 CTR5(KTR_VM,
238 "insert: tree %p, index %p, level %d, slot %d, child %p",
239 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
240 rnode = rnode->rn_child[slot];
241 }
242
243 slot = vm_radix_slot(index, level);
244 CTR5(KTR_VM, "insert: tree %p, index %p, level %d, slot %d, child %p",
245 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
246 KASSERT(rnode->rn_child[slot] == NULL,
247 ("vm_radix_insert: Duplicate value %p at index: %lu\n",
248 rnode->rn_child[slot], (u_long)index));
249 rnode->rn_child[slot] = val;
250 rnode->rn_count++;
251
252 return 0;
253}
254
255/*
256 * Returns the value stored at the index. If the index is not present
257 * NULL is returned.
258 */
259void *
260vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
261{
262 struct vm_radix_node *rnode;
263 int slot;
264 int level;
265
266 slot = vm_radix_slot(index, level);
267 /* Add the required intermidiate nodes. */
268 if (rnode->rn_child[slot] == NULL) {
269 rnode->rn_child[slot] = vm_radix_node_get();
270 if (rnode->rn_child[slot] == NULL)
271 return (ENOMEM);
272 rnode->rn_count++;
273 }
274 CTR5(KTR_VM,
275 "insert: tree %p, index %p, level %d, slot %d, child %p",
276 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
277 rnode = rnode->rn_child[slot];
278 }
279
280 slot = vm_radix_slot(index, level);
281 CTR5(KTR_VM, "insert: tree %p, index %p, level %d, slot %d, child %p",
282 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
283 KASSERT(rnode->rn_child[slot] == NULL,
284 ("vm_radix_insert: Duplicate value %p at index: %lu\n",
285 rnode->rn_child[slot], (u_long)index));
286 rnode->rn_child[slot] = val;
287 rnode->rn_count++;
288
289 return 0;
290}
291
292/*
293 * Returns the value stored at the index. If the index is not present
294 * NULL is returned.
295 */
296void *
297vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
298{
299 struct vm_radix_node *rnode;
300 int slot;
301 int level;
302
266 if (index > VM_RADIX_MAX(rtree->rt_height))
303 level = vm_radix_height(rtree, &rnode);
304 if (index > VM_RADIX_MAX(level))
267 return NULL;
305 return NULL;
268 level = rtree->rt_height - 1;
269 rnode = rtree->rt_root;
306 level--;
270 while (rnode) {
271 slot = vm_radix_slot(index, level);
272 CTR5(KTR_VM,
273 "lookup: tree %p, index %p, level %d, slot %d, child %p",
274 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
275 if (level == 0)
276 return rnode->rn_child[slot];
277 rnode = rnode->rn_child[slot];
278 level--;
279 }
280 CTR2(KTR_VM, "lookup: tree %p, index %p failed", rtree, (void *)index);
281
282 return NULL;
283}
284
285/*
286 * Looks up as many as cnt values between start and end inclusive, and stores
287 * them in the caller allocated array out. The next index can be used to
288 * restart the scan. This optimizes forward scans in the tree.
289 */
290int
291vm_radix_lookupn(struct vm_radix *rtree, vm_pindex_t start,
292 vm_pindex_t end, void **out, int cnt, vm_pindex_t *next)
293{
294 struct vm_radix_node *rnode;
295 struct vm_radix_node *child;
296 vm_pindex_t max;
297 vm_pindex_t inc;
298 int slot;
299 int level;
300 void *val;
301 int outidx;
302 int loops = 0;
303
304 CTR3(KTR_VM, "lookupn: tree %p, start %p, end %p",
305 rtree, (void *)start, (void *)end);
306 outidx = 0;
307 while (rnode) {
308 slot = vm_radix_slot(index, level);
309 CTR5(KTR_VM,
310 "lookup: tree %p, index %p, level %d, slot %d, child %p",
311 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
312 if (level == 0)
313 return rnode->rn_child[slot];
314 rnode = rnode->rn_child[slot];
315 level--;
316 }
317 CTR2(KTR_VM, "lookup: tree %p, index %p failed", rtree, (void *)index);
318
319 return NULL;
320}
321
322/*
323 * Looks up as many as cnt values between start and end inclusive, and stores
324 * them in the caller allocated array out. The next index can be used to
325 * restart the scan. This optimizes forward scans in the tree.
326 */
327int
328vm_radix_lookupn(struct vm_radix *rtree, vm_pindex_t start,
329 vm_pindex_t end, void **out, int cnt, vm_pindex_t *next)
330{
331 struct vm_radix_node *rnode;
332 struct vm_radix_node *child;
333 vm_pindex_t max;
334 vm_pindex_t inc;
335 int slot;
336 int level;
337 void *val;
338 int outidx;
339 int loops = 0;
340
341 CTR3(KTR_VM, "lookupn: tree %p, start %p, end %p",
342 rtree, (void *)start, (void *)end);
343 outidx = 0;
307 max = VM_RADIX_MAX(rtree->rt_height);
344restart:
345 level = vm_radix_height(rtree, &rnode);
346 max = VM_RADIX_MAX(level);
308 if (start > max)
347 if (start > max)
309 return 0;
348 goto out;
310 if (end > max || end == 0)
311 end = max;
349 if (end > max || end == 0)
350 end = max;
312restart:
313 loops++;
314 if (loops > 1000)
315 panic("vm_radix_lookupn: looping %d\n", loops);
316 /*
317 * Search the tree from the top for any leaf node holding an index
318 * between start and end.
319 */
351 loops++;
352 if (loops > 1000)
353 panic("vm_radix_lookupn: looping %d\n", loops);
354 /*
355 * Search the tree from the top for any leaf node holding an index
356 * between start and end.
357 */
320 level = rtree->rt_height - 1;
321 rnode = rtree->rt_root;
358 level--;
322 while (rnode) {
323 slot = vm_radix_slot(start, level);
324 CTR5(KTR_VM,
325 "lookupn: tree %p, index %p, level %d, slot %d, child %p",
326 rtree, (void *)start, level, slot, rnode->rn_child[slot]);
327 if (level == 0)
328 break;
329 /*
330 * If we don't have an exact match we must start our search
331 * from the next leaf and adjust our index appropriately.
332 */
333 if ((child = rnode->rn_child[slot]) == NULL) {
334 /*
335 * Calculate how much to increment our index by
336 * based on the tree level. We must truncate the
337 * lower bits to start from the begnning of the next
338 * leaf.
339 */
340 inc = 1LL << (level * VM_RADIX_WIDTH);
341 start &= ~VM_RADIX_MAX(level);
342 start += inc;
343 slot++;
344 CTR5(KTR_VM,
345 "lookupn: start %p end %p inc %d mask 0x%lX slot %d",
346 (void *)start, (void *)end, inc, ~VM_RADIX_MAX(level), slot);
347 for (; slot < VM_RADIX_COUNT && start <= end;
348 slot++, start += inc) {
349 child = rnode->rn_child[slot];
350 if (child)
351 break;
352 }
353 }
354 rnode = child;
355 level--;
356 }
357 if (rnode == NULL) {
358 /*
359 * If we still have another range to search, start looking
360 * from the next node. Otherwise, return what we've already
361 * found. The loop above has already adjusted start to the
362 * beginning of the next node.
363 *
364 * Detect start wrapping back to 0 and return in this case.
365 */
366 if (start <= end && start != 0)
367 goto restart;
368 goto out;
369 }
370 for (; outidx < cnt && slot < VM_RADIX_COUNT && start <= end;
371 slot++, start++) {
372 val = rnode->rn_child[slot];
373 if (val == NULL)
374 continue;
375 out[outidx++] = val;
376 }
377 /*
378 * Go fetch the next page to fill the requested number of pages
379 * otherwise the caller will simply call us again to fulfill the
380 * same request after the structures are pushed out of cache.
381 */
382 if (outidx < cnt && start <= end)
383 goto restart;
384
385out:
386 *next = start;
387
388 return (outidx);
389}
390
391/*
392 * Look up any entry at a position less than or equal to index.
393 */
394void *
395vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
396{
397 struct vm_radix_node *rnode;
398 struct vm_radix_node *child;
399 vm_pindex_t max;
400 vm_pindex_t inc;
401 int slot;
402 int level;
403 int loops = 0;
404
405 CTR2(KTR_VM,
406 "lookup_le: tree %p, index %p", rtree, (void *)index);
359 while (rnode) {
360 slot = vm_radix_slot(start, level);
361 CTR5(KTR_VM,
362 "lookupn: tree %p, index %p, level %d, slot %d, child %p",
363 rtree, (void *)start, level, slot, rnode->rn_child[slot]);
364 if (level == 0)
365 break;
366 /*
367 * If we don't have an exact match we must start our search
368 * from the next leaf and adjust our index appropriately.
369 */
370 if ((child = rnode->rn_child[slot]) == NULL) {
371 /*
372 * Calculate how much to increment our index by
373 * based on the tree level. We must truncate the
374 * lower bits to start from the begnning of the next
375 * leaf.
376 */
377 inc = 1LL << (level * VM_RADIX_WIDTH);
378 start &= ~VM_RADIX_MAX(level);
379 start += inc;
380 slot++;
381 CTR5(KTR_VM,
382 "lookupn: start %p end %p inc %d mask 0x%lX slot %d",
383 (void *)start, (void *)end, inc, ~VM_RADIX_MAX(level), slot);
384 for (; slot < VM_RADIX_COUNT && start <= end;
385 slot++, start += inc) {
386 child = rnode->rn_child[slot];
387 if (child)
388 break;
389 }
390 }
391 rnode = child;
392 level--;
393 }
394 if (rnode == NULL) {
395 /*
396 * If we still have another range to search, start looking
397 * from the next node. Otherwise, return what we've already
398 * found. The loop above has already adjusted start to the
399 * beginning of the next node.
400 *
401 * Detect start wrapping back to 0 and return in this case.
402 */
403 if (start <= end && start != 0)
404 goto restart;
405 goto out;
406 }
407 for (; outidx < cnt && slot < VM_RADIX_COUNT && start <= end;
408 slot++, start++) {
409 val = rnode->rn_child[slot];
410 if (val == NULL)
411 continue;
412 out[outidx++] = val;
413 }
414 /*
415 * Go fetch the next page to fill the requested number of pages
416 * otherwise the caller will simply call us again to fulfill the
417 * same request after the structures are pushed out of cache.
418 */
419 if (outidx < cnt && start <= end)
420 goto restart;
421
422out:
423 *next = start;
424
425 return (outidx);
426}
427
428/*
429 * Look up any entry at a position less than or equal to index.
430 */
431void *
432vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
433{
434 struct vm_radix_node *rnode;
435 struct vm_radix_node *child;
436 vm_pindex_t max;
437 vm_pindex_t inc;
438 int slot;
439 int level;
440 int loops = 0;
441
442 CTR2(KTR_VM,
443 "lookup_le: tree %p, index %p", rtree, (void *)index);
407 if (rtree->rt_root == NULL)
444restart:
445 level = vm_radix_height(rtree, &rnode);
446 if (rnode == NULL)
408 return (NULL);
447 return (NULL);
409 max = VM_RADIX_MAX(rtree->rt_height);
448 max = VM_RADIX_MAX(level);
410 if (index > max || index == 0)
411 index = max;
449 if (index > max || index == 0)
450 index = max;
412restart:
413 loops++;
414 if (loops > 1000)
415 panic("vm_radix_looku_le: looping %d\n", loops);
416 /*
417 * Search the tree from the top for any leaf node holding an index
418 * lower than 'index'.
419 */
451 loops++;
452 if (loops > 1000)
453 panic("vm_radix_looku_le: looping %d\n", loops);
454 /*
455 * Search the tree from the top for any leaf node holding an index
456 * lower than 'index'.
457 */
420 level = rtree->rt_height - 1;
421 rnode = rtree->rt_root;
458 level--;
422 while (rnode) {
423 slot = vm_radix_slot(index, level);
424 CTR5(KTR_VM,
425 "lookup_le: tree %p, index %p, level %d, slot %d, child %p",
426 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
427 if (level == 0)
428 break;
429 /*
430 * If we don't have an exact match we must start our search
431 * from the next leaf and adjust our index appropriately.
432 */
433 if ((child = rnode->rn_child[slot]) == NULL) {
434 /*
435 * Calculate how much to decrement our index by
436 * based on the tree level. We must set the
437 * lower bits to start from the end of the next
438 * leaf.
439 */
440 inc = 1LL << (level * VM_RADIX_WIDTH);
441 index |= VM_RADIX_MAX(level);
442 index -= inc;
443 slot--;
444 CTR4(KTR_VM,
445 "lookup_le: start %p inc %ld mask 0x%lX slot %d",
446 (void *)index, inc, VM_RADIX_MAX(level), slot);
447 for (; slot >= 0; slot--, index -= inc) {
448 child = rnode->rn_child[slot];
449 if (child)
450 break;
451 }
452 }
453 rnode = child;
454 level--;
455 }
456 if (rnode) {
457 for (; slot >= 0; slot--, index--) {
458 if (rnode->rn_child[slot])
459 return (rnode->rn_child[slot]);
460 }
461 }
462 if (index != -1)
463 goto restart;
464 return (NULL);
465}
466
467/*
468 * Remove the specified index from the tree. If possible the height of the
469 * tree is adjusted after deletion. The value stored at index is returned
470 * panics if the key is not present.
471 */
472void *
473vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
474{
475 struct vm_radix_node *stack[VM_RADIX_LIMIT];
459 while (rnode) {
460 slot = vm_radix_slot(index, level);
461 CTR5(KTR_VM,
462 "lookup_le: tree %p, index %p, level %d, slot %d, child %p",
463 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
464 if (level == 0)
465 break;
466 /*
467 * If we don't have an exact match we must start our search
468 * from the next leaf and adjust our index appropriately.
469 */
470 if ((child = rnode->rn_child[slot]) == NULL) {
471 /*
472 * Calculate how much to decrement our index by
473 * based on the tree level. We must set the
474 * lower bits to start from the end of the next
475 * leaf.
476 */
477 inc = 1LL << (level * VM_RADIX_WIDTH);
478 index |= VM_RADIX_MAX(level);
479 index -= inc;
480 slot--;
481 CTR4(KTR_VM,
482 "lookup_le: start %p inc %ld mask 0x%lX slot %d",
483 (void *)index, inc, VM_RADIX_MAX(level), slot);
484 for (; slot >= 0; slot--, index -= inc) {
485 child = rnode->rn_child[slot];
486 if (child)
487 break;
488 }
489 }
490 rnode = child;
491 level--;
492 }
493 if (rnode) {
494 for (; slot >= 0; slot--, index--) {
495 if (rnode->rn_child[slot])
496 return (rnode->rn_child[slot]);
497 }
498 }
499 if (index != -1)
500 goto restart;
501 return (NULL);
502}
503
504/*
505 * Remove the specified index from the tree. If possible the height of the
506 * tree is adjusted after deletion. The value stored at index is returned
507 * panics if the key is not present.
508 */
509void *
510vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
511{
512 struct vm_radix_node *stack[VM_RADIX_LIMIT];
476 struct vm_radix_node *rnode;
513 struct vm_radix_node *rnode, *root;
477 void *val;
478 int level;
479 int slot;
480
514 void *val;
515 int level;
516 int slot;
517
481 KASSERT(index <= VM_RADIX_MAX(rtree->rt_height),
518 level = vm_radix_height(rtree, &root);
519 KASSERT(index <= VM_RADIX_MAX(level),
482 ("vm_radix_remove: %p index %jd out of range %jd.",
520 ("vm_radix_remove: %p index %jd out of range %jd.",
483 rtree, index, VM_RADIX_MAX(rtree->rt_height)));
521 rtree, index, VM_RADIX_MAX(level)));
522 rnode = root;
484 val = NULL;
523 val = NULL;
485 rnode = rtree->rt_root;
486 level = rtree->rt_height - 1;
524 level--;
487 /*
488 * Find the node and record the path in stack.
489 */
490 while (level && rnode) {
491 stack[level] = rnode;
492 slot = vm_radix_slot(index, level);
493 rnode = rnode->rn_child[slot];
494 CTR5(KTR_VM,
495 "remove: tree %p, index %p, level %d, slot %d, child %p",
496 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
497 level--;
498 }
499 slot = vm_radix_slot(index, 0);
500 KASSERT(rnode != NULL && rnode->rn_child[slot] != NULL,
501 ("vm_radix_remove: index %jd not present in the tree.\n", index));
502
503 val = rnode->rn_child[slot];
504 for (;;) {
505 rnode->rn_child[slot] = NULL;
506 rnode->rn_count--;
507 if (rnode->rn_count > 0)
508 break;
509 vm_radix_node_put(rnode);
525 /*
526 * Find the node and record the path in stack.
527 */
528 while (level && rnode) {
529 stack[level] = rnode;
530 slot = vm_radix_slot(index, level);
531 rnode = rnode->rn_child[slot];
532 CTR5(KTR_VM,
533 "remove: tree %p, index %p, level %d, slot %d, child %p",
534 rtree, (void *)index, level, slot, rnode->rn_child[slot]);
535 level--;
536 }
537 slot = vm_radix_slot(index, 0);
538 KASSERT(rnode != NULL && rnode->rn_child[slot] != NULL,
539 ("vm_radix_remove: index %jd not present in the tree.\n", index));
540
541 val = rnode->rn_child[slot];
542 for (;;) {
543 rnode->rn_child[slot] = NULL;
544 rnode->rn_count--;
545 if (rnode->rn_count > 0)
546 break;
547 vm_radix_node_put(rnode);
510 if (rnode == rtree->rt_root) {
511 rtree->rt_root = NULL;
512 rtree->rt_height = 0;
548 if (rnode == root) {
549 vm_radix_setroot(rtree, NULL, 0);
513 break;
514 }
515 rnode = stack[++level];
516 slot = vm_radix_slot(index, level);
517
518 }
519 return (val);
520}
521
522/*
523 * Attempts to reduce the height of the tree.
524 */
525void
526vm_radix_shrink(struct vm_radix *rtree)
527{
550 break;
551 }
552 rnode = stack[++level];
553 slot = vm_radix_slot(index, level);
554
555 }
556 return (val);
557}
558
559/*
560 * Attempts to reduce the height of the tree.
561 */
562void
563vm_radix_shrink(struct vm_radix *rtree)
564{
528 struct vm_radix_node *tmp;
565 struct vm_radix_node *tmp, *root;
566 int level;
529
567
530 if (rtree->rt_root == NULL)
568 if (rtree->rt_root == 0)
531 return;
569 return;
570 level = vm_radix_height(rtree, &root);
532
533 /* Adjust the height of the tree. */
571
572 /* Adjust the height of the tree. */
534 while (rtree->rt_root->rn_count == 1 &&
535 rtree->rt_root->rn_child[0] != NULL) {
536 tmp = rtree->rt_root;
537 rtree->rt_root = tmp->rn_child[0];
538 rtree->rt_height--;
539 tmp->rn_count--;
573 while (root->rn_count == 1 && root->rn_child[0] != NULL) {
574 tmp = root;
575 root->rn_count--;
576 root = root->rn_child[0];
577 level--;
540 vm_radix_node_put(tmp);
541 }
542 /* Finally see if we have an empty tree. */
578 vm_radix_node_put(tmp);
579 }
580 /* Finally see if we have an empty tree. */
543 if (rtree->rt_root->rn_count == 0) {
544 vm_radix_node_put(rtree->rt_root);
545 rtree->rt_root = NULL;
546 rtree->rt_height = 0;
581 if (root->rn_count == 0) {
582 vm_radix_node_put(root);
583 root = NULL;
584 level--;
547 }
585 }
586 vm_radix_setroot(rtree, root, level);
548}
587}