Deleted Added
full compact
vm_radix.c (249502) vm_radix.c (249605)
1/*
2 * Copyright (c) 2013 EMC Corp.
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30/*
31 * Path-compressed radix trie implementation.
32 * The following code is not generalized into a general purpose library
33 * because there are way too many parameters embedded that should really
34 * be decided by the library consumers. At the same time, consumers
35 * of this code must achieve highest possible performance.
36 *
37 * The implementation takes into account the following rationale:
38 * - Size of the nodes should be as small as possible but still big enough
39 * to avoid a large maximum depth for the trie. This is a balance
40 * between the necessity to not wire too much physical memory for the nodes
41 * and the necessity to avoid too much cache pollution during the trie
42 * operations.
43 * - There is not a huge bias toward the number of lookup operations over
44 * the number of insert and remove operations. This basically implies
45 * that optimizations supposedly helping one operation but hurting the
46 * other might be carefully evaluated.
47 * - On average not many nodes are expected to be fully populated, hence
48 * level compression may just complicate things.
49 */
50
51#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2013 EMC Corp.
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30/*
31 * Path-compressed radix trie implementation.
32 * The following code is not generalized into a general purpose library
33 * because there are way too many parameters embedded that should really
34 * be decided by the library consumers. At the same time, consumers
35 * of this code must achieve highest possible performance.
36 *
37 * The implementation takes into account the following rationale:
38 * - Size of the nodes should be as small as possible but still big enough
39 * to avoid a large maximum depth for the trie. This is a balance
40 * between the necessity to not wire too much physical memory for the nodes
41 * and the necessity to avoid too much cache pollution during the trie
42 * operations.
43 * - There is not a huge bias toward the number of lookup operations over
44 * the number of insert and remove operations. This basically implies
45 * that optimizations supposedly helping one operation but hurting the
46 * other might be carefully evaluated.
47 * - On average not many nodes are expected to be fully populated, hence
48 * level compression may just complicate things.
49 */
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: head/sys/vm/vm_radix.c 249502 2013-04-15 06:12:00Z alc $");
52__FBSDID("$FreeBSD: head/sys/vm/vm_radix.c 249605 2013-04-18 05:34:33Z alc $");
53
54#include "opt_ddb.h"
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/kernel.h>
59#include <sys/vmmeter.h>
60
61#include <vm/uma.h>
62#include <vm/vm.h>
63#include <vm/vm_param.h>
64#include <vm/vm_page.h>
65#include <vm/vm_radix.h>
66
67#ifdef DDB
68#include <ddb/ddb.h>
69#endif
70
71/*
72 * These widths should allow the pointers to a node's children to fit within
73 * a single cache line. The extra levels from a narrow width should not be
74 * a problem thanks to path compression.
75 */
76#ifdef __LP64__
77#define VM_RADIX_WIDTH 4
78#else
79#define VM_RADIX_WIDTH 3
80#endif
81
82#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
83#define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
84#define VM_RADIX_LIMIT \
85 (howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1)
86
87/* Flag bits stored in node pointers. */
88#define VM_RADIX_ISLEAF 0x1
89#define VM_RADIX_FLAGS 0x1
90#define VM_RADIX_PAD VM_RADIX_FLAGS
91
92/* Returns one unit associated with specified level. */
93#define VM_RADIX_UNITLEVEL(lev) \
94 ((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH))
95
96struct vm_radix_node {
97 vm_pindex_t rn_owner; /* Owner of record. */
98 uint16_t rn_count; /* Valid children. */
99 uint16_t rn_clev; /* Current level. */
100 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
101};
102
103static uma_zone_t vm_radix_node_zone;
104
105/*
106 * Allocate a radix node. Pre-allocation should ensure that the request
107 * will always be satisfied.
108 */
109static __inline struct vm_radix_node *
110vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
111{
112 struct vm_radix_node *rnode;
113
114 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT);
115
116 /*
117 * The required number of nodes should already be pre-allocated
118 * by vm_radix_prealloc(). However, UMA can hold a few nodes
119 * in per-CPU buckets, which will not be accessible by the
120 * current CPU. Thus, the allocation could return NULL when
121 * the pre-allocated pool is close to exhaustion. Anyway,
122 * in practice this should never occur because a new node
123 * is not always required for insert. Thus, the pre-allocated
124 * pool should have some extra pages that prevent this from
125 * becoming a problem.
126 */
127 if (rnode == NULL)
128 panic("%s: uma_zalloc() returned NULL for a new node",
129 __func__);
130 rnode->rn_owner = owner;
131 rnode->rn_count = count;
132 rnode->rn_clev = clevel;
133 return (rnode);
134}
135
136/*
137 * Free radix node.
138 */
139static __inline void
140vm_radix_node_put(struct vm_radix_node *rnode)
141{
142
143 uma_zfree(vm_radix_node_zone, rnode);
144}
145
146/*
147 * Return the position in the array for a given level.
148 */
149static __inline int
150vm_radix_slot(vm_pindex_t index, uint16_t level)
151{
152
153 return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) &
154 VM_RADIX_MASK);
155}
156
157/* Trims the key after the specified level. */
158static __inline vm_pindex_t
159vm_radix_trimkey(vm_pindex_t index, uint16_t level)
160{
161 vm_pindex_t ret;
162
163 ret = index;
164 if (level < VM_RADIX_LIMIT) {
165 ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
166 ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
167 }
168 return (ret);
169}
170
171/*
172 * Get the root node for a radix tree.
173 */
174static __inline struct vm_radix_node *
175vm_radix_getroot(struct vm_radix *rtree)
176{
177
178 return ((struct vm_radix_node *)rtree->rt_root);
179}
180
181/*
182 * Set the root node for a radix tree.
183 */
184static __inline void
185vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
186{
187
188 rtree->rt_root = (uintptr_t)rnode;
189}
190
191/*
192 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
193 */
194static __inline boolean_t
195vm_radix_isleaf(struct vm_radix_node *rnode)
196{
197
198 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
199}
200
201/*
202 * Returns the associated page extracted from rnode.
203 */
204static __inline vm_page_t
205vm_radix_topage(struct vm_radix_node *rnode)
206{
207
208 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
209}
210
211/*
212 * Adds the page as a child of the provided node.
213 */
214static __inline void
215vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
216 vm_page_t page)
217{
218 int slot;
219
220 slot = vm_radix_slot(index, clev);
221 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF);
222}
223
224/*
225 * Returns the slot where two keys differ.
226 * It cannot accept 2 equal keys.
227 */
228static __inline uint16_t
229vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
230{
231 uint16_t clev;
232
233 KASSERT(index1 != index2, ("%s: passing the same key value %jx",
234 __func__, (uintmax_t)index1));
235
236 index1 ^= index2;
237 for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++)
238 if (vm_radix_slot(index1, clev))
239 return (clev);
240 panic("%s: cannot reach this point", __func__);
241 return (0);
242}
243
244/*
245 * Returns TRUE if it can be determined that key does not belong to the
246 * specified rnode. Otherwise, returns FALSE.
247 */
248static __inline boolean_t
249vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
250{
251
252 if (rnode->rn_clev > 0) {
253 idx = vm_radix_trimkey(idx, rnode->rn_clev - 1);
254 return (idx != rnode->rn_owner);
255 }
256 return (FALSE);
257}
258
259/*
260 * Adjusts the idx key to the first upper level available, based on a valid
261 * initial level and map of available levels.
262 * Returns a value bigger than 0 to signal that there are not valid levels
263 * available.
264 */
265static __inline int
266vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
267{
268 vm_pindex_t wrapidx;
269
270 for (; levels[ilev] == FALSE ||
271 vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--)
272 if (ilev == 0)
273 return (1);
274 wrapidx = *idx;
275 *idx = vm_radix_trimkey(*idx, ilev);
276 *idx += VM_RADIX_UNITLEVEL(ilev);
277 return (*idx < wrapidx);
278}
279
280/*
281 * Adjusts the idx key to the first lower level available, based on a valid
282 * initial level and map of available levels.
283 * Returns a value bigger than 0 to signal that there are not valid levels
284 * available.
285 */
286static __inline int
287vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
288{
289 vm_pindex_t wrapidx;
290
291 for (; levels[ilev] == FALSE ||
292 vm_radix_slot(*idx, ilev) == 0; ilev--)
293 if (ilev == 0)
294 return (1);
295 wrapidx = *idx;
296 *idx = vm_radix_trimkey(*idx, ilev);
297 *idx |= VM_RADIX_UNITLEVEL(ilev) - 1;
298 *idx -= VM_RADIX_UNITLEVEL(ilev);
299 return (*idx > wrapidx);
300}
301
302/*
303 * Internal helper for vm_radix_reclaim_allnodes().
304 * This function is recursive.
305 */
306static void
307vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
308{
309 int slot;
310
311 KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
312 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
313 for (slot = 0; rnode->rn_count != 0; slot++) {
314 if (rnode->rn_child[slot] == NULL)
315 continue;
316 if (!vm_radix_isleaf(rnode->rn_child[slot]))
317 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]);
318 rnode->rn_child[slot] = NULL;
319 rnode->rn_count--;
320 }
321 vm_radix_node_put(rnode);
322}
323
324#ifdef INVARIANTS
325/*
326 * Radix node zone destructor.
327 */
328static void
329vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
330{
331 struct vm_radix_node *rnode;
332 int slot;
333
334 rnode = mem;
335 KASSERT(rnode->rn_count == 0,
336 ("vm_radix_node_put: rnode %p has %d children", rnode,
337 rnode->rn_count));
338 for (slot = 0; slot < VM_RADIX_COUNT; slot++)
339 KASSERT(rnode->rn_child[slot] == NULL,
340 ("vm_radix_node_put: rnode %p has a child", rnode));
341}
342#endif
343
344/*
345 * Radix node zone initializer.
346 */
347static int
348vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused)
349{
350 struct vm_radix_node *rnode;
351
352 rnode = mem;
353 memset(rnode->rn_child, 0, sizeof(rnode->rn_child));
354 return (0);
355}
356
357/*
358 * Pre-allocate intermediate nodes from the UMA slab zone.
359 */
360static void
361vm_radix_prealloc(void *arg __unused)
362{
53
54#include "opt_ddb.h"
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/kernel.h>
59#include <sys/vmmeter.h>
60
61#include <vm/uma.h>
62#include <vm/vm.h>
63#include <vm/vm_param.h>
64#include <vm/vm_page.h>
65#include <vm/vm_radix.h>
66
67#ifdef DDB
68#include <ddb/ddb.h>
69#endif
70
71/*
72 * These widths should allow the pointers to a node's children to fit within
73 * a single cache line. The extra levels from a narrow width should not be
74 * a problem thanks to path compression.
75 */
76#ifdef __LP64__
77#define VM_RADIX_WIDTH 4
78#else
79#define VM_RADIX_WIDTH 3
80#endif
81
82#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
83#define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
84#define VM_RADIX_LIMIT \
85 (howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1)
86
87/* Flag bits stored in node pointers. */
88#define VM_RADIX_ISLEAF 0x1
89#define VM_RADIX_FLAGS 0x1
90#define VM_RADIX_PAD VM_RADIX_FLAGS
91
92/* Returns one unit associated with specified level. */
93#define VM_RADIX_UNITLEVEL(lev) \
94 ((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH))
95
96struct vm_radix_node {
97 vm_pindex_t rn_owner; /* Owner of record. */
98 uint16_t rn_count; /* Valid children. */
99 uint16_t rn_clev; /* Current level. */
100 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
101};
102
103static uma_zone_t vm_radix_node_zone;
104
105/*
106 * Allocate a radix node. Pre-allocation should ensure that the request
107 * will always be satisfied.
108 */
109static __inline struct vm_radix_node *
110vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
111{
112 struct vm_radix_node *rnode;
113
114 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT);
115
116 /*
117 * The required number of nodes should already be pre-allocated
118 * by vm_radix_prealloc(). However, UMA can hold a few nodes
119 * in per-CPU buckets, which will not be accessible by the
120 * current CPU. Thus, the allocation could return NULL when
121 * the pre-allocated pool is close to exhaustion. Anyway,
122 * in practice this should never occur because a new node
123 * is not always required for insert. Thus, the pre-allocated
124 * pool should have some extra pages that prevent this from
125 * becoming a problem.
126 */
127 if (rnode == NULL)
128 panic("%s: uma_zalloc() returned NULL for a new node",
129 __func__);
130 rnode->rn_owner = owner;
131 rnode->rn_count = count;
132 rnode->rn_clev = clevel;
133 return (rnode);
134}
135
136/*
137 * Free radix node.
138 */
139static __inline void
140vm_radix_node_put(struct vm_radix_node *rnode)
141{
142
143 uma_zfree(vm_radix_node_zone, rnode);
144}
145
146/*
147 * Return the position in the array for a given level.
148 */
149static __inline int
150vm_radix_slot(vm_pindex_t index, uint16_t level)
151{
152
153 return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) &
154 VM_RADIX_MASK);
155}
156
157/* Trims the key after the specified level. */
158static __inline vm_pindex_t
159vm_radix_trimkey(vm_pindex_t index, uint16_t level)
160{
161 vm_pindex_t ret;
162
163 ret = index;
164 if (level < VM_RADIX_LIMIT) {
165 ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
166 ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
167 }
168 return (ret);
169}
170
171/*
172 * Get the root node for a radix tree.
173 */
174static __inline struct vm_radix_node *
175vm_radix_getroot(struct vm_radix *rtree)
176{
177
178 return ((struct vm_radix_node *)rtree->rt_root);
179}
180
181/*
182 * Set the root node for a radix tree.
183 */
184static __inline void
185vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
186{
187
188 rtree->rt_root = (uintptr_t)rnode;
189}
190
191/*
192 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
193 */
194static __inline boolean_t
195vm_radix_isleaf(struct vm_radix_node *rnode)
196{
197
198 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
199}
200
201/*
202 * Returns the associated page extracted from rnode.
203 */
204static __inline vm_page_t
205vm_radix_topage(struct vm_radix_node *rnode)
206{
207
208 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
209}
210
211/*
212 * Adds the page as a child of the provided node.
213 */
214static __inline void
215vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
216 vm_page_t page)
217{
218 int slot;
219
220 slot = vm_radix_slot(index, clev);
221 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF);
222}
223
224/*
225 * Returns the slot where two keys differ.
226 * It cannot accept 2 equal keys.
227 */
228static __inline uint16_t
229vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
230{
231 uint16_t clev;
232
233 KASSERT(index1 != index2, ("%s: passing the same key value %jx",
234 __func__, (uintmax_t)index1));
235
236 index1 ^= index2;
237 for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++)
238 if (vm_radix_slot(index1, clev))
239 return (clev);
240 panic("%s: cannot reach this point", __func__);
241 return (0);
242}
243
244/*
245 * Returns TRUE if it can be determined that key does not belong to the
246 * specified rnode. Otherwise, returns FALSE.
247 */
248static __inline boolean_t
249vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
250{
251
252 if (rnode->rn_clev > 0) {
253 idx = vm_radix_trimkey(idx, rnode->rn_clev - 1);
254 return (idx != rnode->rn_owner);
255 }
256 return (FALSE);
257}
258
259/*
260 * Adjusts the idx key to the first upper level available, based on a valid
261 * initial level and map of available levels.
262 * Returns a value bigger than 0 to signal that there are not valid levels
263 * available.
264 */
265static __inline int
266vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
267{
268 vm_pindex_t wrapidx;
269
270 for (; levels[ilev] == FALSE ||
271 vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--)
272 if (ilev == 0)
273 return (1);
274 wrapidx = *idx;
275 *idx = vm_radix_trimkey(*idx, ilev);
276 *idx += VM_RADIX_UNITLEVEL(ilev);
277 return (*idx < wrapidx);
278}
279
280/*
281 * Adjusts the idx key to the first lower level available, based on a valid
282 * initial level and map of available levels.
283 * Returns a value bigger than 0 to signal that there are not valid levels
284 * available.
285 */
286static __inline int
287vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
288{
289 vm_pindex_t wrapidx;
290
291 for (; levels[ilev] == FALSE ||
292 vm_radix_slot(*idx, ilev) == 0; ilev--)
293 if (ilev == 0)
294 return (1);
295 wrapidx = *idx;
296 *idx = vm_radix_trimkey(*idx, ilev);
297 *idx |= VM_RADIX_UNITLEVEL(ilev) - 1;
298 *idx -= VM_RADIX_UNITLEVEL(ilev);
299 return (*idx > wrapidx);
300}
301
302/*
303 * Internal helper for vm_radix_reclaim_allnodes().
304 * This function is recursive.
305 */
306static void
307vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
308{
309 int slot;
310
311 KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
312 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
313 for (slot = 0; rnode->rn_count != 0; slot++) {
314 if (rnode->rn_child[slot] == NULL)
315 continue;
316 if (!vm_radix_isleaf(rnode->rn_child[slot]))
317 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]);
318 rnode->rn_child[slot] = NULL;
319 rnode->rn_count--;
320 }
321 vm_radix_node_put(rnode);
322}
323
324#ifdef INVARIANTS
325/*
326 * Radix node zone destructor.
327 */
328static void
329vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
330{
331 struct vm_radix_node *rnode;
332 int slot;
333
334 rnode = mem;
335 KASSERT(rnode->rn_count == 0,
336 ("vm_radix_node_put: rnode %p has %d children", rnode,
337 rnode->rn_count));
338 for (slot = 0; slot < VM_RADIX_COUNT; slot++)
339 KASSERT(rnode->rn_child[slot] == NULL,
340 ("vm_radix_node_put: rnode %p has a child", rnode));
341}
342#endif
343
344/*
345 * Radix node zone initializer.
346 */
347static int
348vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused)
349{
350 struct vm_radix_node *rnode;
351
352 rnode = mem;
353 memset(rnode->rn_child, 0, sizeof(rnode->rn_child));
354 return (0);
355}
356
357/*
358 * Pre-allocate intermediate nodes from the UMA slab zone.
359 */
360static void
361vm_radix_prealloc(void *arg __unused)
362{
363 int nodes;
363
364
364 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
365 /*
366 * Calculate the number of reserved nodes, discounting the pages that
367 * are needed to store them.
368 */
369 nodes = ((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
370 sizeof(struct vm_radix_node));
371 if (!uma_zone_reserve_kva(vm_radix_node_zone, nodes))
365 panic("%s: unable to create new zone", __func__);
372 panic("%s: unable to create new zone", __func__);
366 uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
373 uma_prealloc(vm_radix_node_zone, nodes);
367}
368SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc,
369 NULL);
370
371/*
372 * Initialize the UMA slab zone.
373 * Until vm_radix_prealloc() is called, the zone will be served by the
374 * UMA boot-time pre-allocated pool of pages.
375 */
376void
377vm_radix_init(void)
378{
379
380 vm_radix_node_zone = uma_zcreate("RADIX NODE",
381 sizeof(struct vm_radix_node), NULL,
382#ifdef INVARIANTS
383 vm_radix_node_zone_dtor,
384#else
385 NULL,
386#endif
387 vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM |
388 UMA_ZONE_NOFREE);
389}
390
391/*
392 * Inserts the key-value pair into the trie.
393 * Panics if the key already exists.
394 */
395void
396vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
397{
398 vm_pindex_t index, newind;
399 void **parentp;
400 struct vm_radix_node *rnode, *tmp;
401 vm_page_t m;
402 int slot;
403 uint16_t clev;
404
405 index = page->pindex;
406
407 /*
408 * The owner of record for root is not really important because it
409 * will never be used.
410 */
411 rnode = vm_radix_getroot(rtree);
412 if (rnode == NULL) {
413 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF;
414 return;
415 }
416 parentp = (void **)&rtree->rt_root;
417 for (;;) {
418 if (vm_radix_isleaf(rnode)) {
419 m = vm_radix_topage(rnode);
420 if (m->pindex == index)
421 panic("%s: key %jx is already present",
422 __func__, (uintmax_t)index);
423 clev = vm_radix_keydiff(m->pindex, index);
424 tmp = vm_radix_node_get(vm_radix_trimkey(index,
425 clev - 1), 2, clev);
426 *parentp = tmp;
427 vm_radix_addpage(tmp, index, clev, page);
428 vm_radix_addpage(tmp, m->pindex, clev, m);
429 return;
430 } else if (vm_radix_keybarr(rnode, index))
431 break;
432 slot = vm_radix_slot(index, rnode->rn_clev);
433 if (rnode->rn_child[slot] == NULL) {
434 rnode->rn_count++;
435 vm_radix_addpage(rnode, index, rnode->rn_clev, page);
436 return;
437 }
438 parentp = &rnode->rn_child[slot];
439 rnode = rnode->rn_child[slot];
440 }
441
442 /*
443 * A new node is needed because the right insertion level is reached.
444 * Setup the new intermediate node and add the 2 children: the
445 * new object and the older edge.
446 */
447 newind = rnode->rn_owner;
448 clev = vm_radix_keydiff(newind, index);
449 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev - 1), 2,
450 clev);
451 *parentp = tmp;
452 vm_radix_addpage(tmp, index, clev, page);
453 slot = vm_radix_slot(newind, clev);
454 tmp->rn_child[slot] = rnode;
455}
456
457/*
458 * Returns the value stored at the index. If the index is not present,
459 * NULL is returned.
460 */
461vm_page_t
462vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
463{
464 struct vm_radix_node *rnode;
465 vm_page_t m;
466 int slot;
467
468 rnode = vm_radix_getroot(rtree);
469 while (rnode != NULL) {
470 if (vm_radix_isleaf(rnode)) {
471 m = vm_radix_topage(rnode);
472 if (m->pindex == index)
473 return (m);
474 else
475 break;
476 } else if (vm_radix_keybarr(rnode, index))
477 break;
478 slot = vm_radix_slot(index, rnode->rn_clev);
479 rnode = rnode->rn_child[slot];
480 }
481 return (NULL);
482}
483
484/*
485 * Look up the nearest entry at a position bigger than or equal to index.
486 */
487vm_page_t
488vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
489{
490 vm_pindex_t inc;
491 vm_page_t m;
492 struct vm_radix_node *child, *rnode;
493 int slot;
494 uint16_t difflev;
495 boolean_t maplevels[VM_RADIX_LIMIT + 1];
496#ifdef INVARIANTS
497 int loops = 0;
498#endif
499
500 rnode = vm_radix_getroot(rtree);
501 if (rnode == NULL)
502 return (NULL);
503 else if (vm_radix_isleaf(rnode)) {
504 m = vm_radix_topage(rnode);
505 if (m->pindex >= index)
506 return (m);
507 else
508 return (NULL);
509 }
510restart:
511 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
512 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
513 maplevels[difflev] = FALSE;
514 for (;;) {
515 maplevels[rnode->rn_clev] = TRUE;
516
517 /*
518 * If the keys differ before the current bisection node
519 * the search key might rollback to the earliest
520 * available bisection node, or to the smaller value
521 * in the current domain (if the owner is bigger than the
522 * search key).
523 * The maplevels array records any node has been seen
524 * at a given level. This aids the search for a valid
525 * bisection node.
526 */
527 if (vm_radix_keybarr(rnode, index)) {
528 difflev = vm_radix_keydiff(index, rnode->rn_owner);
529 if (index > rnode->rn_owner) {
530 if (vm_radix_addlev(&index, maplevels,
531 difflev) > 0)
532 break;
533 } else
534 index = vm_radix_trimkey(rnode->rn_owner,
535 difflev);
536 rnode = vm_radix_getroot(rtree);
537 goto restart;
538 }
539 slot = vm_radix_slot(index, rnode->rn_clev);
540 child = rnode->rn_child[slot];
541 if (vm_radix_isleaf(child)) {
542 m = vm_radix_topage(child);
543 if (m->pindex >= index)
544 return (m);
545 } else if (child != NULL)
546 goto descend;
547
548 /*
549 * Look for an available edge or page within the current
550 * bisection node.
551 */
552 if (slot < (VM_RADIX_COUNT - 1)) {
553 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
554 index = vm_radix_trimkey(index, rnode->rn_clev);
555 do {
556 index += inc;
557 slot++;
558 child = rnode->rn_child[slot];
559 if (vm_radix_isleaf(child)) {
560 m = vm_radix_topage(child);
561 if (m->pindex >= index)
562 return (m);
563 } else if (child != NULL)
564 goto descend;
565 } while (slot < (VM_RADIX_COUNT - 1));
566 }
567 KASSERT(child == NULL || vm_radix_isleaf(child),
568 ("vm_radix_lookup_ge: child is radix node"));
569
570 /*
571 * If a valid page or edge bigger than the search slot is
572 * found in the traversal, skip to the next higher-level key.
573 */
574 if (rnode->rn_clev == 0 || vm_radix_addlev(&index, maplevels,
575 rnode->rn_clev - 1) > 0)
576 break;
577 rnode = vm_radix_getroot(rtree);
578 goto restart;
579descend:
580 rnode = child;
581 }
582 return (NULL);
583}
584
585/*
586 * Look up the nearest entry at a position less than or equal to index.
587 */
588vm_page_t
589vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
590{
591 vm_pindex_t inc;
592 vm_page_t m;
593 struct vm_radix_node *child, *rnode;
594 int slot;
595 uint16_t difflev;
596 boolean_t maplevels[VM_RADIX_LIMIT + 1];
597#ifdef INVARIANTS
598 int loops = 0;
599#endif
600
601 rnode = vm_radix_getroot(rtree);
602 if (rnode == NULL)
603 return (NULL);
604 else if (vm_radix_isleaf(rnode)) {
605 m = vm_radix_topage(rnode);
606 if (m->pindex <= index)
607 return (m);
608 else
609 return (NULL);
610 }
611restart:
612 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
613 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
614 maplevels[difflev] = FALSE;
615 for (;;) {
616 maplevels[rnode->rn_clev] = TRUE;
617
618 /*
619 * If the keys differ before the current bisection node
620 * the search key might rollback to the earliest
621 * available bisection node, or to the higher value
622 * in the current domain (if the owner is smaller than the
623 * search key).
624 * The maplevels array records any node has been seen
625 * at a given level. This aids the search for a valid
626 * bisection node.
627 */
628 if (vm_radix_keybarr(rnode, index)) {
629 difflev = vm_radix_keydiff(index, rnode->rn_owner);
630 if (index > rnode->rn_owner) {
631 index = vm_radix_trimkey(rnode->rn_owner,
632 difflev);
633 index |= VM_RADIX_UNITLEVEL(difflev) - 1;
634 } else if (vm_radix_declev(&index, maplevels,
635 difflev) > 0)
636 break;
637 rnode = vm_radix_getroot(rtree);
638 goto restart;
639 }
640 slot = vm_radix_slot(index, rnode->rn_clev);
641 child = rnode->rn_child[slot];
642 if (vm_radix_isleaf(child)) {
643 m = vm_radix_topage(child);
644 if (m->pindex <= index)
645 return (m);
646 } else if (child != NULL)
647 goto descend;
648
649 /*
650 * Look for an available edge or page within the current
651 * bisection node.
652 */
653 if (slot > 0) {
654 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
655 index = vm_radix_trimkey(index, rnode->rn_clev);
656 index |= inc - 1;
657 do {
658 index -= inc;
659 slot--;
660 child = rnode->rn_child[slot];
661 if (vm_radix_isleaf(child)) {
662 m = vm_radix_topage(child);
663 if (m->pindex <= index)
664 return (m);
665 } else if (child != NULL)
666 goto descend;
667 } while (slot > 0);
668 }
669 KASSERT(child == NULL || vm_radix_isleaf(child),
670 ("vm_radix_lookup_le: child is radix node"));
671
672 /*
673 * If a valid page or edge smaller than the search slot is
674 * found in the traversal, skip to the next higher-level key.
675 */
676 if (rnode->rn_clev == 0 || vm_radix_declev(&index, maplevels,
677 rnode->rn_clev - 1) > 0)
678 break;
679 rnode = vm_radix_getroot(rtree);
680 goto restart;
681descend:
682 rnode = child;
683 }
684 return (NULL);
685}
686
687/*
688 * Remove the specified index from the tree.
689 * Panics if the key is not present.
690 */
691void
692vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
693{
694 struct vm_radix_node *rnode, *parent;
695 vm_page_t m;
696 int i, slot;
697
698 rnode = vm_radix_getroot(rtree);
699 if (vm_radix_isleaf(rnode)) {
700 m = vm_radix_topage(rnode);
701 if (m->pindex != index)
702 panic("%s: invalid key found", __func__);
703 vm_radix_setroot(rtree, NULL);
704 return;
705 }
706 parent = NULL;
707 for (;;) {
708 if (rnode == NULL)
709 panic("vm_radix_remove: impossible to locate the key");
710 slot = vm_radix_slot(index, rnode->rn_clev);
711 if (vm_radix_isleaf(rnode->rn_child[slot])) {
712 m = vm_radix_topage(rnode->rn_child[slot]);
713 if (m->pindex != index)
714 panic("%s: invalid key found", __func__);
715 rnode->rn_child[slot] = NULL;
716 rnode->rn_count--;
717 if (rnode->rn_count > 1)
718 break;
719 for (i = 0; i < VM_RADIX_COUNT; i++)
720 if (rnode->rn_child[i] != NULL)
721 break;
722 KASSERT(i != VM_RADIX_COUNT,
723 ("%s: invalid node configuration", __func__));
724 if (parent == NULL)
725 vm_radix_setroot(rtree, rnode->rn_child[i]);
726 else {
727 slot = vm_radix_slot(index, parent->rn_clev);
728 KASSERT(parent->rn_child[slot] == rnode,
729 ("%s: invalid child value", __func__));
730 parent->rn_child[slot] = rnode->rn_child[i];
731 }
732 rnode->rn_count--;
733 rnode->rn_child[i] = NULL;
734 vm_radix_node_put(rnode);
735 break;
736 }
737 parent = rnode;
738 rnode = rnode->rn_child[slot];
739 }
740}
741
742/*
743 * Remove and free all the nodes from the radix tree.
744 * This function is recursive but there is a tight control on it as the
745 * maximum depth of the tree is fixed.
746 */
747void
748vm_radix_reclaim_allnodes(struct vm_radix *rtree)
749{
750 struct vm_radix_node *root;
751
752 root = vm_radix_getroot(rtree);
753 if (root == NULL)
754 return;
755 vm_radix_setroot(rtree, NULL);
756 if (!vm_radix_isleaf(root))
757 vm_radix_reclaim_allnodes_int(root);
758}
759
760#ifdef DDB
761/*
762 * Show details about the given radix node.
763 */
764DB_SHOW_COMMAND(radixnode, db_show_radixnode)
765{
766 struct vm_radix_node *rnode;
767 int i;
768
769 if (!have_addr)
770 return;
771 rnode = (struct vm_radix_node *)addr;
772 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
773 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
774 rnode->rn_clev);
775 for (i = 0; i < VM_RADIX_COUNT; i++)
776 if (rnode->rn_child[i] != NULL)
777 db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
778 i, (void *)rnode->rn_child[i],
779 vm_radix_isleaf(rnode->rn_child[i]) ?
780 vm_radix_topage(rnode->rn_child[i]) : NULL,
781 rnode->rn_clev);
782}
783#endif /* DDB */
374}
375SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc,
376 NULL);
377
378/*
379 * Initialize the UMA slab zone.
380 * Until vm_radix_prealloc() is called, the zone will be served by the
381 * UMA boot-time pre-allocated pool of pages.
382 */
383void
384vm_radix_init(void)
385{
386
387 vm_radix_node_zone = uma_zcreate("RADIX NODE",
388 sizeof(struct vm_radix_node), NULL,
389#ifdef INVARIANTS
390 vm_radix_node_zone_dtor,
391#else
392 NULL,
393#endif
394 vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM |
395 UMA_ZONE_NOFREE);
396}
397
398/*
399 * Inserts the key-value pair into the trie.
400 * Panics if the key already exists.
401 */
402void
403vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
404{
405 vm_pindex_t index, newind;
406 void **parentp;
407 struct vm_radix_node *rnode, *tmp;
408 vm_page_t m;
409 int slot;
410 uint16_t clev;
411
412 index = page->pindex;
413
414 /*
415 * The owner of record for root is not really important because it
416 * will never be used.
417 */
418 rnode = vm_radix_getroot(rtree);
419 if (rnode == NULL) {
420 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF;
421 return;
422 }
423 parentp = (void **)&rtree->rt_root;
424 for (;;) {
425 if (vm_radix_isleaf(rnode)) {
426 m = vm_radix_topage(rnode);
427 if (m->pindex == index)
428 panic("%s: key %jx is already present",
429 __func__, (uintmax_t)index);
430 clev = vm_radix_keydiff(m->pindex, index);
431 tmp = vm_radix_node_get(vm_radix_trimkey(index,
432 clev - 1), 2, clev);
433 *parentp = tmp;
434 vm_radix_addpage(tmp, index, clev, page);
435 vm_radix_addpage(tmp, m->pindex, clev, m);
436 return;
437 } else if (vm_radix_keybarr(rnode, index))
438 break;
439 slot = vm_radix_slot(index, rnode->rn_clev);
440 if (rnode->rn_child[slot] == NULL) {
441 rnode->rn_count++;
442 vm_radix_addpage(rnode, index, rnode->rn_clev, page);
443 return;
444 }
445 parentp = &rnode->rn_child[slot];
446 rnode = rnode->rn_child[slot];
447 }
448
449 /*
450 * A new node is needed because the right insertion level is reached.
451 * Setup the new intermediate node and add the 2 children: the
452 * new object and the older edge.
453 */
454 newind = rnode->rn_owner;
455 clev = vm_radix_keydiff(newind, index);
456 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev - 1), 2,
457 clev);
458 *parentp = tmp;
459 vm_radix_addpage(tmp, index, clev, page);
460 slot = vm_radix_slot(newind, clev);
461 tmp->rn_child[slot] = rnode;
462}
463
464/*
465 * Returns the value stored at the index. If the index is not present,
466 * NULL is returned.
467 */
468vm_page_t
469vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
470{
471 struct vm_radix_node *rnode;
472 vm_page_t m;
473 int slot;
474
475 rnode = vm_radix_getroot(rtree);
476 while (rnode != NULL) {
477 if (vm_radix_isleaf(rnode)) {
478 m = vm_radix_topage(rnode);
479 if (m->pindex == index)
480 return (m);
481 else
482 break;
483 } else if (vm_radix_keybarr(rnode, index))
484 break;
485 slot = vm_radix_slot(index, rnode->rn_clev);
486 rnode = rnode->rn_child[slot];
487 }
488 return (NULL);
489}
490
491/*
492 * Look up the nearest entry at a position bigger than or equal to index.
493 */
494vm_page_t
495vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
496{
497 vm_pindex_t inc;
498 vm_page_t m;
499 struct vm_radix_node *child, *rnode;
500 int slot;
501 uint16_t difflev;
502 boolean_t maplevels[VM_RADIX_LIMIT + 1];
503#ifdef INVARIANTS
504 int loops = 0;
505#endif
506
507 rnode = vm_radix_getroot(rtree);
508 if (rnode == NULL)
509 return (NULL);
510 else if (vm_radix_isleaf(rnode)) {
511 m = vm_radix_topage(rnode);
512 if (m->pindex >= index)
513 return (m);
514 else
515 return (NULL);
516 }
517restart:
518 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
519 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
520 maplevels[difflev] = FALSE;
521 for (;;) {
522 maplevels[rnode->rn_clev] = TRUE;
523
524 /*
525 * If the keys differ before the current bisection node
526 * the search key might rollback to the earliest
527 * available bisection node, or to the smaller value
528 * in the current domain (if the owner is bigger than the
529 * search key).
530 * The maplevels array records any node has been seen
531 * at a given level. This aids the search for a valid
532 * bisection node.
533 */
534 if (vm_radix_keybarr(rnode, index)) {
535 difflev = vm_radix_keydiff(index, rnode->rn_owner);
536 if (index > rnode->rn_owner) {
537 if (vm_radix_addlev(&index, maplevels,
538 difflev) > 0)
539 break;
540 } else
541 index = vm_radix_trimkey(rnode->rn_owner,
542 difflev);
543 rnode = vm_radix_getroot(rtree);
544 goto restart;
545 }
546 slot = vm_radix_slot(index, rnode->rn_clev);
547 child = rnode->rn_child[slot];
548 if (vm_radix_isleaf(child)) {
549 m = vm_radix_topage(child);
550 if (m->pindex >= index)
551 return (m);
552 } else if (child != NULL)
553 goto descend;
554
555 /*
556 * Look for an available edge or page within the current
557 * bisection node.
558 */
559 if (slot < (VM_RADIX_COUNT - 1)) {
560 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
561 index = vm_radix_trimkey(index, rnode->rn_clev);
562 do {
563 index += inc;
564 slot++;
565 child = rnode->rn_child[slot];
566 if (vm_radix_isleaf(child)) {
567 m = vm_radix_topage(child);
568 if (m->pindex >= index)
569 return (m);
570 } else if (child != NULL)
571 goto descend;
572 } while (slot < (VM_RADIX_COUNT - 1));
573 }
574 KASSERT(child == NULL || vm_radix_isleaf(child),
575 ("vm_radix_lookup_ge: child is radix node"));
576
577 /*
578 * If a valid page or edge bigger than the search slot is
579 * found in the traversal, skip to the next higher-level key.
580 */
581 if (rnode->rn_clev == 0 || vm_radix_addlev(&index, maplevels,
582 rnode->rn_clev - 1) > 0)
583 break;
584 rnode = vm_radix_getroot(rtree);
585 goto restart;
586descend:
587 rnode = child;
588 }
589 return (NULL);
590}
591
592/*
593 * Look up the nearest entry at a position less than or equal to index.
594 */
595vm_page_t
596vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
597{
598 vm_pindex_t inc;
599 vm_page_t m;
600 struct vm_radix_node *child, *rnode;
601 int slot;
602 uint16_t difflev;
603 boolean_t maplevels[VM_RADIX_LIMIT + 1];
604#ifdef INVARIANTS
605 int loops = 0;
606#endif
607
608 rnode = vm_radix_getroot(rtree);
609 if (rnode == NULL)
610 return (NULL);
611 else if (vm_radix_isleaf(rnode)) {
612 m = vm_radix_topage(rnode);
613 if (m->pindex <= index)
614 return (m);
615 else
616 return (NULL);
617 }
618restart:
619 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
620 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
621 maplevels[difflev] = FALSE;
622 for (;;) {
623 maplevels[rnode->rn_clev] = TRUE;
624
625 /*
626 * If the keys differ before the current bisection node
627 * the search key might rollback to the earliest
628 * available bisection node, or to the higher value
629 * in the current domain (if the owner is smaller than the
630 * search key).
631 * The maplevels array records any node has been seen
632 * at a given level. This aids the search for a valid
633 * bisection node.
634 */
635 if (vm_radix_keybarr(rnode, index)) {
636 difflev = vm_radix_keydiff(index, rnode->rn_owner);
637 if (index > rnode->rn_owner) {
638 index = vm_radix_trimkey(rnode->rn_owner,
639 difflev);
640 index |= VM_RADIX_UNITLEVEL(difflev) - 1;
641 } else if (vm_radix_declev(&index, maplevels,
642 difflev) > 0)
643 break;
644 rnode = vm_radix_getroot(rtree);
645 goto restart;
646 }
647 slot = vm_radix_slot(index, rnode->rn_clev);
648 child = rnode->rn_child[slot];
649 if (vm_radix_isleaf(child)) {
650 m = vm_radix_topage(child);
651 if (m->pindex <= index)
652 return (m);
653 } else if (child != NULL)
654 goto descend;
655
656 /*
657 * Look for an available edge or page within the current
658 * bisection node.
659 */
660 if (slot > 0) {
661 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
662 index = vm_radix_trimkey(index, rnode->rn_clev);
663 index |= inc - 1;
664 do {
665 index -= inc;
666 slot--;
667 child = rnode->rn_child[slot];
668 if (vm_radix_isleaf(child)) {
669 m = vm_radix_topage(child);
670 if (m->pindex <= index)
671 return (m);
672 } else if (child != NULL)
673 goto descend;
674 } while (slot > 0);
675 }
676 KASSERT(child == NULL || vm_radix_isleaf(child),
677 ("vm_radix_lookup_le: child is radix node"));
678
679 /*
680 * If a valid page or edge smaller than the search slot is
681 * found in the traversal, skip to the next higher-level key.
682 */
683 if (rnode->rn_clev == 0 || vm_radix_declev(&index, maplevels,
684 rnode->rn_clev - 1) > 0)
685 break;
686 rnode = vm_radix_getroot(rtree);
687 goto restart;
688descend:
689 rnode = child;
690 }
691 return (NULL);
692}
693
694/*
695 * Remove the specified index from the tree.
696 * Panics if the key is not present.
697 */
698void
699vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
700{
701 struct vm_radix_node *rnode, *parent;
702 vm_page_t m;
703 int i, slot;
704
705 rnode = vm_radix_getroot(rtree);
706 if (vm_radix_isleaf(rnode)) {
707 m = vm_radix_topage(rnode);
708 if (m->pindex != index)
709 panic("%s: invalid key found", __func__);
710 vm_radix_setroot(rtree, NULL);
711 return;
712 }
713 parent = NULL;
714 for (;;) {
715 if (rnode == NULL)
716 panic("vm_radix_remove: impossible to locate the key");
717 slot = vm_radix_slot(index, rnode->rn_clev);
718 if (vm_radix_isleaf(rnode->rn_child[slot])) {
719 m = vm_radix_topage(rnode->rn_child[slot]);
720 if (m->pindex != index)
721 panic("%s: invalid key found", __func__);
722 rnode->rn_child[slot] = NULL;
723 rnode->rn_count--;
724 if (rnode->rn_count > 1)
725 break;
726 for (i = 0; i < VM_RADIX_COUNT; i++)
727 if (rnode->rn_child[i] != NULL)
728 break;
729 KASSERT(i != VM_RADIX_COUNT,
730 ("%s: invalid node configuration", __func__));
731 if (parent == NULL)
732 vm_radix_setroot(rtree, rnode->rn_child[i]);
733 else {
734 slot = vm_radix_slot(index, parent->rn_clev);
735 KASSERT(parent->rn_child[slot] == rnode,
736 ("%s: invalid child value", __func__));
737 parent->rn_child[slot] = rnode->rn_child[i];
738 }
739 rnode->rn_count--;
740 rnode->rn_child[i] = NULL;
741 vm_radix_node_put(rnode);
742 break;
743 }
744 parent = rnode;
745 rnode = rnode->rn_child[slot];
746 }
747}
748
749/*
750 * Remove and free all the nodes from the radix tree.
751 * This function is recursive but there is a tight control on it as the
752 * maximum depth of the tree is fixed.
753 */
754void
755vm_radix_reclaim_allnodes(struct vm_radix *rtree)
756{
757 struct vm_radix_node *root;
758
759 root = vm_radix_getroot(rtree);
760 if (root == NULL)
761 return;
762 vm_radix_setroot(rtree, NULL);
763 if (!vm_radix_isleaf(root))
764 vm_radix_reclaim_allnodes_int(root);
765}
766
767#ifdef DDB
768/*
769 * Show details about the given radix node.
770 */
771DB_SHOW_COMMAND(radixnode, db_show_radixnode)
772{
773 struct vm_radix_node *rnode;
774 int i;
775
776 if (!have_addr)
777 return;
778 rnode = (struct vm_radix_node *)addr;
779 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
780 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
781 rnode->rn_clev);
782 for (i = 0; i < VM_RADIX_COUNT; i++)
783 if (rnode->rn_child[i] != NULL)
784 db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
785 i, (void *)rnode->rn_child[i],
786 vm_radix_isleaf(rnode->rn_child[i]) ?
787 vm_radix_topage(rnode->rn_child[i]) : NULL,
788 rnode->rn_clev);
789}
790#endif /* DDB */