1/* 2 * Copyright (c) 2013 EMC Corp. 3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30/* 31 * Path-compressed radix trie implementation. 32 * The following code is not generalized into a general purpose library 33 * because there are way too many parameters embedded that should really 34 * be decided by the library consumers. At the same time, consumers 35 * of this code must achieve highest possible performance. 36 * 37 * The implementation takes into account the following rationale: 38 * - Size of the nodes might be as small as possible. 39 * - There is no bias toward lookup operations over inserts or removes, 40 * and vice-versa. 41 * - In average there are not many complete levels, than level 42 * compression may just complicate things. 43 */ 44 45#include <sys/cdefs.h> 46 47#include "opt_ddb.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/kernel.h> 52#include <sys/vmmeter.h> 53 54#include <vm/uma.h> 55#include <vm/vm.h> 56#include <vm/vm_param.h> 57#include <vm/vm_page.h> 58#include <vm/vm_radix.h> 59 60#ifdef DDB 61#include <ddb/ddb.h> 62#endif 63 64/* 65 * These widths should allow the pointers to a node's children to fit within 66 * a single cache line. The extra levels from a narrow width should not be 67 * a problem thanks to path compression. 68 */ 69#ifdef __LP64__ 70#define VM_RADIX_WIDTH 4 71#else 72#define VM_RADIX_WIDTH 3 73#endif 74 75#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 76#define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 77#define VM_RADIX_LIMIT \ 78 (howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1) 79 80/* Flag bits stored in node pointers. */ 81#define VM_RADIX_ISLEAF 0x1 82#define VM_RADIX_FLAGS 0x1 83#define VM_RADIX_PAD VM_RADIX_FLAGS 84 85/* Returns one unit associated with specified level. */ 86#define VM_RADIX_UNITLEVEL(lev) \ 87 ((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH)) 88 89struct vm_radix_node { 90 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 91 vm_pindex_t rn_owner; /* Owner of record. */ 92 uint16_t rn_count; /* Valid children. */ 93 uint16_t rn_clev; /* Current level. */ 94}; 95 96static uma_zone_t vm_radix_node_zone; 97 98/* 99 * Allocate a radix node. Pre-allocation should ensure that the request 100 * will always be satisfied. 101 */ 102static __inline struct vm_radix_node * 103vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 104{ 105 struct vm_radix_node *rnode; 106 107 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 108 109 /* 110 * The required number of nodes should already be pre-allocated 111 * by vm_radix_prealloc(). However, UMA can hold a few nodes 112 * in per-CPU buckets, which will not be accessible by the 113 * current CPU. Thus, the allocation could return NULL when 114 * the pre-allocated pool is close to exhaustion. Anyway, 115 * in practice this should never occur because a new node 116 * is not always required for insert. Thus, the pre-allocated 117 * pool should have some extra pages that prevent this from 118 * becoming a problem. 119 */ 120 if (rnode == NULL) 121 panic("%s: uma_zalloc() returned NULL for a new node", 122 __func__); 123 rnode->rn_owner = owner; 124 rnode->rn_count = count; 125 rnode->rn_clev = clevel; 126 return (rnode); 127} 128 129/* 130 * Free radix node. 131 */ 132static __inline void 133vm_radix_node_put(struct vm_radix_node *rnode) 134{ 135 136 uma_zfree(vm_radix_node_zone, rnode); 137} 138 139/* 140 * Return the position in the array for a given level. 141 */ 142static __inline int 143vm_radix_slot(vm_pindex_t index, uint16_t level) 144{ 145 146 return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) & 147 VM_RADIX_MASK); 148} 149 150/* Trims the key after the specified level. */ 151static __inline vm_pindex_t 152vm_radix_trimkey(vm_pindex_t index, uint16_t level) 153{ 154 vm_pindex_t ret; 155 156 ret = index; 157 if (level < VM_RADIX_LIMIT) { 158 ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH; 159 ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH; 160 } 161 return (ret); 162} 163 164/* 165 * Get the root node for a radix tree. 166 */ 167static __inline struct vm_radix_node * 168vm_radix_getroot(struct vm_radix *rtree) 169{ 170 171 return ((struct vm_radix_node *)(rtree->rt_root & ~VM_RADIX_FLAGS)); 172} 173 174/* 175 * Set the root node for a radix tree. 176 */ 177static __inline void 178vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 179{ 180 181 rtree->rt_root = (uintptr_t)rnode; 182} 183 184/* 185 * Returns the associated page extracted from rnode if available, 186 * and NULL otherwise. 187 */ 188static __inline vm_page_t 189vm_radix_node_page(struct vm_radix_node *rnode) 190{ 191 192 return ((((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0) ? 193 (vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS) : NULL); 194} 195 196/* 197 * Adds the page as a child of the provided node. 198 */ 199static __inline void 200vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 201 vm_page_t page) 202{ 203 int slot; 204 205 slot = vm_radix_slot(index, clev); 206 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 207} 208 209/* 210 * Returns the slot where two keys differ. 211 * It cannot accept 2 equal keys. 212 */ 213static __inline uint16_t 214vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 215{ 216 uint16_t clev; 217 218 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 219 __func__, (uintmax_t)index1)); 220 221 index1 ^= index2; 222 for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++) 223 if (vm_radix_slot(index1, clev)) 224 return (clev); 225 panic("%s: it might have not reached this point", __func__); 226 return (0); 227} 228 229/* 230 * Returns TRUE if it can be determined that key does not belong to the 231 * specified rnode. Otherwise, returns FALSE. 232 */ 233static __inline boolean_t 234vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235{ 236 237 if (rnode->rn_clev > 0) { 238 idx = vm_radix_trimkey(idx, rnode->rn_clev - 1); 239 idx -= rnode->rn_owner; 240 if (idx != 0) 241 return (TRUE); 242 } 243 return (FALSE); 244} 245 246/* 247 * Adjusts the idx key to the first upper level available, based on a valid 248 * initial level and map of available levels. 249 * Returns a value bigger than 0 to signal that there are not valid levels 250 * available. 251 */ 252static __inline int 253vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev) 254{ 255 vm_pindex_t wrapidx; 256 257 for (; levels[ilev] == FALSE || 258 vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--) 259 if (ilev == 0) 260 break; 261 KASSERT(ilev > 0 || levels[0], 262 ("%s: levels back-scanning problem", __func__)); 263 if (ilev == 0 && vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1)) 264 return (1); 265 wrapidx = *idx; 266 *idx = vm_radix_trimkey(*idx, ilev); 267 *idx += VM_RADIX_UNITLEVEL(ilev); 268 return (*idx < wrapidx); 269} 270 271/* 272 * Adjusts the idx key to the first lower level available, based on a valid 273 * initial level and map of available levels. 274 * Returns a value bigger than 0 to signal that there are not valid levels 275 * available. 276 */ 277static __inline int 278vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev) 279{ 280 vm_pindex_t wrapidx; 281 282 for (; levels[ilev] == FALSE || 283 vm_radix_slot(*idx, ilev) == 0; ilev--) 284 if (ilev == 0) 285 break; 286 KASSERT(ilev > 0 || levels[0], 287 ("%s: levels back-scanning problem", __func__)); 288 if (ilev == 0 && vm_radix_slot(*idx, ilev) == 0) 289 return (1); 290 wrapidx = *idx; 291 *idx = vm_radix_trimkey(*idx, ilev); 292 *idx |= VM_RADIX_UNITLEVEL(ilev) - 1; 293 *idx -= VM_RADIX_UNITLEVEL(ilev); 294 return (*idx > wrapidx); 295} 296 297/*
| 1/* 2 * Copyright (c) 2013 EMC Corp. 3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org> 4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30/* 31 * Path-compressed radix trie implementation. 32 * The following code is not generalized into a general purpose library 33 * because there are way too many parameters embedded that should really 34 * be decided by the library consumers. At the same time, consumers 35 * of this code must achieve highest possible performance. 36 * 37 * The implementation takes into account the following rationale: 38 * - Size of the nodes might be as small as possible. 39 * - There is no bias toward lookup operations over inserts or removes, 40 * and vice-versa. 41 * - In average there are not many complete levels, than level 42 * compression may just complicate things. 43 */ 44 45#include <sys/cdefs.h> 46 47#include "opt_ddb.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/kernel.h> 52#include <sys/vmmeter.h> 53 54#include <vm/uma.h> 55#include <vm/vm.h> 56#include <vm/vm_param.h> 57#include <vm/vm_page.h> 58#include <vm/vm_radix.h> 59 60#ifdef DDB 61#include <ddb/ddb.h> 62#endif 63 64/* 65 * These widths should allow the pointers to a node's children to fit within 66 * a single cache line. The extra levels from a narrow width should not be 67 * a problem thanks to path compression. 68 */ 69#ifdef __LP64__ 70#define VM_RADIX_WIDTH 4 71#else 72#define VM_RADIX_WIDTH 3 73#endif 74 75#define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH) 76#define VM_RADIX_MASK (VM_RADIX_COUNT - 1) 77#define VM_RADIX_LIMIT \ 78 (howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1) 79 80/* Flag bits stored in node pointers. */ 81#define VM_RADIX_ISLEAF 0x1 82#define VM_RADIX_FLAGS 0x1 83#define VM_RADIX_PAD VM_RADIX_FLAGS 84 85/* Returns one unit associated with specified level. */ 86#define VM_RADIX_UNITLEVEL(lev) \ 87 ((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH)) 88 89struct vm_radix_node { 90 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */ 91 vm_pindex_t rn_owner; /* Owner of record. */ 92 uint16_t rn_count; /* Valid children. */ 93 uint16_t rn_clev; /* Current level. */ 94}; 95 96static uma_zone_t vm_radix_node_zone; 97 98/* 99 * Allocate a radix node. Pre-allocation should ensure that the request 100 * will always be satisfied. 101 */ 102static __inline struct vm_radix_node * 103vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) 104{ 105 struct vm_radix_node *rnode; 106 107 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); 108 109 /* 110 * The required number of nodes should already be pre-allocated 111 * by vm_radix_prealloc(). However, UMA can hold a few nodes 112 * in per-CPU buckets, which will not be accessible by the 113 * current CPU. Thus, the allocation could return NULL when 114 * the pre-allocated pool is close to exhaustion. Anyway, 115 * in practice this should never occur because a new node 116 * is not always required for insert. Thus, the pre-allocated 117 * pool should have some extra pages that prevent this from 118 * becoming a problem. 119 */ 120 if (rnode == NULL) 121 panic("%s: uma_zalloc() returned NULL for a new node", 122 __func__); 123 rnode->rn_owner = owner; 124 rnode->rn_count = count; 125 rnode->rn_clev = clevel; 126 return (rnode); 127} 128 129/* 130 * Free radix node. 131 */ 132static __inline void 133vm_radix_node_put(struct vm_radix_node *rnode) 134{ 135 136 uma_zfree(vm_radix_node_zone, rnode); 137} 138 139/* 140 * Return the position in the array for a given level. 141 */ 142static __inline int 143vm_radix_slot(vm_pindex_t index, uint16_t level) 144{ 145 146 return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) & 147 VM_RADIX_MASK); 148} 149 150/* Trims the key after the specified level. */ 151static __inline vm_pindex_t 152vm_radix_trimkey(vm_pindex_t index, uint16_t level) 153{ 154 vm_pindex_t ret; 155 156 ret = index; 157 if (level < VM_RADIX_LIMIT) { 158 ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH; 159 ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH; 160 } 161 return (ret); 162} 163 164/* 165 * Get the root node for a radix tree. 166 */ 167static __inline struct vm_radix_node * 168vm_radix_getroot(struct vm_radix *rtree) 169{ 170 171 return ((struct vm_radix_node *)(rtree->rt_root & ~VM_RADIX_FLAGS)); 172} 173 174/* 175 * Set the root node for a radix tree. 176 */ 177static __inline void 178vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode) 179{ 180 181 rtree->rt_root = (uintptr_t)rnode; 182} 183 184/* 185 * Returns the associated page extracted from rnode if available, 186 * and NULL otherwise. 187 */ 188static __inline vm_page_t 189vm_radix_node_page(struct vm_radix_node *rnode) 190{ 191 192 return ((((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0) ? 193 (vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS) : NULL); 194} 195 196/* 197 * Adds the page as a child of the provided node. 198 */ 199static __inline void 200vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev, 201 vm_page_t page) 202{ 203 int slot; 204 205 slot = vm_radix_slot(index, clev); 206 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF); 207} 208 209/* 210 * Returns the slot where two keys differ. 211 * It cannot accept 2 equal keys. 212 */ 213static __inline uint16_t 214vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2) 215{ 216 uint16_t clev; 217 218 KASSERT(index1 != index2, ("%s: passing the same key value %jx", 219 __func__, (uintmax_t)index1)); 220 221 index1 ^= index2; 222 for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++) 223 if (vm_radix_slot(index1, clev)) 224 return (clev); 225 panic("%s: it might have not reached this point", __func__); 226 return (0); 227} 228 229/* 230 * Returns TRUE if it can be determined that key does not belong to the 231 * specified rnode. Otherwise, returns FALSE. 232 */ 233static __inline boolean_t 234vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx) 235{ 236 237 if (rnode->rn_clev > 0) { 238 idx = vm_radix_trimkey(idx, rnode->rn_clev - 1); 239 idx -= rnode->rn_owner; 240 if (idx != 0) 241 return (TRUE); 242 } 243 return (FALSE); 244} 245 246/* 247 * Adjusts the idx key to the first upper level available, based on a valid 248 * initial level and map of available levels. 249 * Returns a value bigger than 0 to signal that there are not valid levels 250 * available. 251 */ 252static __inline int 253vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev) 254{ 255 vm_pindex_t wrapidx; 256 257 for (; levels[ilev] == FALSE || 258 vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--) 259 if (ilev == 0) 260 break; 261 KASSERT(ilev > 0 || levels[0], 262 ("%s: levels back-scanning problem", __func__)); 263 if (ilev == 0 && vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1)) 264 return (1); 265 wrapidx = *idx; 266 *idx = vm_radix_trimkey(*idx, ilev); 267 *idx += VM_RADIX_UNITLEVEL(ilev); 268 return (*idx < wrapidx); 269} 270 271/* 272 * Adjusts the idx key to the first lower level available, based on a valid 273 * initial level and map of available levels. 274 * Returns a value bigger than 0 to signal that there are not valid levels 275 * available. 276 */ 277static __inline int 278vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev) 279{ 280 vm_pindex_t wrapidx; 281 282 for (; levels[ilev] == FALSE || 283 vm_radix_slot(*idx, ilev) == 0; ilev--) 284 if (ilev == 0) 285 break; 286 KASSERT(ilev > 0 || levels[0], 287 ("%s: levels back-scanning problem", __func__)); 288 if (ilev == 0 && vm_radix_slot(*idx, ilev) == 0) 289 return (1); 290 wrapidx = *idx; 291 *idx = vm_radix_trimkey(*idx, ilev); 292 *idx |= VM_RADIX_UNITLEVEL(ilev) - 1; 293 *idx -= VM_RADIX_UNITLEVEL(ilev); 294 return (*idx > wrapidx); 295} 296 297/*
|
299 * This function is recursive. 300 */ 301static void 302vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 303{ 304 int slot; 305 306 for (slot = 0; slot < VM_RADIX_COUNT && rnode->rn_count != 0; slot++) { 307 if (rnode->rn_child[slot] == NULL) 308 continue; 309 if (vm_radix_node_page(rnode->rn_child[slot]) == NULL) 310 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 311 rnode->rn_count--; 312 } 313 vm_radix_node_put(rnode); 314} 315 316#ifdef INVARIANTS 317/* 318 * Radix node zone destructor. 319 */ 320static void 321vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 322{ 323 struct vm_radix_node *rnode; 324 325 rnode = mem; 326 KASSERT(rnode->rn_count == 0, 327 ("vm_radix_node_put: Freeing node %p with %d children\n", mem, 328 rnode->rn_count)); 329} 330#endif 331 332/* 333 * Pre-allocate intermediate nodes from the UMA slab zone. 334 */ 335static void 336vm_radix_prealloc(void *arg __unused) 337{ 338 339 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count)) 340 panic("%s: unable to create new zone", __func__); 341 uma_prealloc(vm_radix_node_zone, cnt.v_page_count); 342} 343SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc, 344 NULL); 345 346/* 347 * Initialize the UMA slab zone. 348 * Until vm_radix_prealloc() is called, the zone will be served by the 349 * UMA boot-time pre-allocated pool of pages. 350 */ 351void 352vm_radix_init(void) 353{ 354 355 vm_radix_node_zone = uma_zcreate("RADIX NODE", 356 sizeof(struct vm_radix_node), NULL, 357#ifdef INVARIANTS 358 vm_radix_node_zone_dtor, 359#else 360 NULL, 361#endif 362 NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE); 363} 364 365/* 366 * Inserts the key-value pair into the trie. 367 * Panics if the key already exists. 368 */ 369void 370vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, vm_page_t page) 371{ 372 vm_pindex_t newind; 373 struct vm_radix_node *rnode, *tmp, *tmp2; 374 vm_page_t m; 375 int slot; 376 uint16_t clev; 377 378 /* 379 * The owner of record for root is not really important because it 380 * will never be used. 381 */ 382 rnode = vm_radix_getroot(rtree); 383 if (rnode == NULL) { 384 rnode = vm_radix_node_get(0, 1, 0); 385 vm_radix_setroot(rtree, rnode); 386 vm_radix_addpage(rnode, index, 0, page); 387 return; 388 } 389 while (rnode != NULL) { 390 if (vm_radix_keybarr(rnode, index)) 391 break; 392 slot = vm_radix_slot(index, rnode->rn_clev); 393 m = vm_radix_node_page(rnode->rn_child[slot]); 394 if (m != NULL) { 395 if (m->pindex == index) 396 panic("%s: key %jx is already present", 397 __func__, (uintmax_t)index); 398 clev = vm_radix_keydiff(m->pindex, index); 399 tmp = vm_radix_node_get(vm_radix_trimkey(index, 400 clev - 1), 2, clev); 401 rnode->rn_child[slot] = tmp; 402 vm_radix_addpage(tmp, index, clev, page); 403 vm_radix_addpage(tmp, m->pindex, clev, m); 404 return; 405 } 406 if (rnode->rn_child[slot] == NULL) { 407 rnode->rn_count++; 408 vm_radix_addpage(rnode, index, rnode->rn_clev, page); 409 return; 410 } 411 rnode = rnode->rn_child[slot]; 412 } 413 if (rnode == NULL) 414 panic("%s: path traversal ended unexpectedly", __func__); 415 416 /* 417 * Scan the trie from the top and find the parent to insert 418 * the new object. 419 */ 420 newind = rnode->rn_owner; 421 clev = vm_radix_keydiff(newind, index); 422 slot = VM_RADIX_COUNT; 423 for (rnode = vm_radix_getroot(rtree); ; rnode = tmp) { 424 KASSERT(rnode != NULL, ("%s: edge cannot be NULL in the scan", 425 __func__)); 426 KASSERT(clev >= rnode->rn_clev, 427 ("%s: unexpected trie depth: clev: %d, rnode->rn_clev: %d", 428 __func__, clev, rnode->rn_clev)); 429 slot = vm_radix_slot(index, rnode->rn_clev); 430 tmp = rnode->rn_child[slot]; 431 KASSERT(tmp != NULL && vm_radix_node_page(tmp) == NULL, 432 ("%s: unexpected lookup interruption", __func__)); 433 if (tmp->rn_clev > clev) 434 break; 435 } 436 KASSERT(rnode != NULL && tmp != NULL && slot < VM_RADIX_COUNT, 437 ("%s: invalid scan parameters rnode: %p, tmp: %p, slot: %d", 438 __func__, (void *)rnode, (void *)tmp, slot)); 439 440 /* 441 * A new node is needed because the right insertion level is reached. 442 * Setup the new intermediate node and add the 2 children: the 443 * new object and the older edge. 444 */ 445 tmp2 = vm_radix_node_get(vm_radix_trimkey(page->pindex, clev - 1), 2, 446 clev); 447 rnode->rn_child[slot] = tmp2; 448 vm_radix_addpage(tmp2, index, clev, page); 449 slot = vm_radix_slot(newind, clev); 450 tmp2->rn_child[slot] = tmp; 451} 452 453/* 454 * Returns the value stored at the index. If the index is not present, 455 * NULL is returned. 456 */ 457vm_page_t 458vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 459{ 460 struct vm_radix_node *rnode; 461 vm_page_t m; 462 int slot; 463 464 rnode = vm_radix_getroot(rtree); 465 while (rnode != NULL) { 466 if (vm_radix_keybarr(rnode, index)) 467 return (NULL); 468 slot = vm_radix_slot(index, rnode->rn_clev); 469 rnode = rnode->rn_child[slot]; 470 m = vm_radix_node_page(rnode); 471 if (m != NULL) { 472 if (m->pindex == index) 473 return (m); 474 else 475 return (NULL); 476 } 477 } 478 return (NULL); 479} 480 481/* 482 * Look up the nearest entry at a position bigger than or equal to index. 483 */ 484vm_page_t 485vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 486{ 487 vm_pindex_t inc; 488 vm_page_t m; 489 struct vm_radix_node *rnode; 490 int slot; 491 uint16_t difflev; 492 boolean_t maplevels[VM_RADIX_LIMIT + 1]; 493#ifdef INVARIANTS 494 int loops = 0; 495#endif 496 497restart: 498 KASSERT(++loops < 1000, ("%s: too many loops", __func__)); 499 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++) 500 maplevels[difflev] = FALSE; 501 rnode = vm_radix_getroot(rtree); 502 while (rnode != NULL) { 503 maplevels[rnode->rn_clev] = TRUE; 504 505 /* 506 * If the keys differ before the current bisection node 507 * the search key might rollback to the earlierst 508 * available bisection node, or to the smaller value 509 * in the current domain (if the owner is bigger than the 510 * search key). 511 * The search for a valid bisection node is helped through 512 * the use of maplevels array which should bring immediately 513 * a lower useful level, skipping holes. 514 */ 515 if (vm_radix_keybarr(rnode, index)) { 516 difflev = vm_radix_keydiff(index, rnode->rn_owner); 517 if (index > rnode->rn_owner) { 518 if (vm_radix_addlev(&index, maplevels, 519 difflev) > 0) 520 break; 521 } else 522 index = vm_radix_trimkey(rnode->rn_owner, 523 difflev); 524 goto restart; 525 } 526 slot = vm_radix_slot(index, rnode->rn_clev); 527 m = vm_radix_node_page(rnode->rn_child[slot]); 528 if (m != NULL && m->pindex >= index) 529 return (m); 530 if (rnode->rn_child[slot] != NULL && m == NULL) { 531 rnode = rnode->rn_child[slot]; 532 continue; 533 } 534 535 /* 536 * Look for an available edge or page within the current 537 * bisection node. 538 */ 539 if (slot < (VM_RADIX_COUNT - 1)) { 540 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 541 index = vm_radix_trimkey(index, rnode->rn_clev); 542 index += inc; 543 slot++; 544 for (;; index += inc, slot++) { 545 m = vm_radix_node_page(rnode->rn_child[slot]); 546 if (m != NULL && m->pindex >= index) 547 return (m); 548 if ((rnode->rn_child[slot] != NULL && 549 m == NULL) || slot == (VM_RADIX_COUNT - 1)) 550 break; 551 } 552 } 553 554 /* 555 * If a valid page or edge bigger than the search slot is 556 * found in the traversal, skip to the next higher-level key. 557 */ 558 if (slot == (VM_RADIX_COUNT - 1) && 559 (rnode->rn_child[slot] == NULL || m != NULL)) { 560 if (rnode->rn_clev == 0 || vm_radix_addlev(&index, 561 maplevels, rnode->rn_clev - 1) > 0) 562 break; 563 goto restart; 564 } 565 rnode = rnode->rn_child[slot]; 566 } 567 return (NULL); 568} 569 570/* 571 * Look up the nearest entry at a position less than or equal to index. 572 */ 573vm_page_t 574vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 575{ 576 vm_pindex_t inc; 577 vm_page_t m; 578 struct vm_radix_node *rnode; 579 int slot; 580 uint16_t difflev; 581 boolean_t maplevels[VM_RADIX_LIMIT + 1]; 582#ifdef INVARIANTS 583 int loops = 0; 584#endif 585 586restart: 587 KASSERT(++loops < 1000, ("%s: too many loops", __func__)); 588 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++) 589 maplevels[difflev] = FALSE; 590 rnode = vm_radix_getroot(rtree); 591 while (rnode != NULL) { 592 maplevels[rnode->rn_clev] = TRUE; 593 594 /* 595 * If the keys differ before the current bisection node 596 * the search key might rollback to the earlierst 597 * available bisection node, or to the higher value 598 * in the current domain (if the owner is smaller than the 599 * search key). 600 * The search for a valid bisection node is helped through 601 * the use of maplevels array which should bring immediately 602 * a lower useful level, skipping holes. 603 */ 604 if (vm_radix_keybarr(rnode, index)) { 605 difflev = vm_radix_keydiff(index, rnode->rn_owner); 606 if (index > rnode->rn_owner) { 607 index = vm_radix_trimkey(rnode->rn_owner, 608 difflev); 609 index |= VM_RADIX_UNITLEVEL(difflev) - 1; 610 } else if (vm_radix_declev(&index, maplevels, 611 difflev) > 0) 612 break; 613 goto restart; 614 } 615 slot = vm_radix_slot(index, rnode->rn_clev); 616 m = vm_radix_node_page(rnode->rn_child[slot]); 617 if (m != NULL && m->pindex <= index) 618 return (m); 619 if (rnode->rn_child[slot] != NULL && m == NULL) { 620 rnode = rnode->rn_child[slot]; 621 continue; 622 } 623 624 /* 625 * Look for an available edge or page within the current 626 * bisection node. 627 */ 628 if (slot > 0) { 629 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 630 index = vm_radix_trimkey(index, rnode->rn_clev); 631 index |= inc - 1; 632 index -= inc; 633 slot--; 634 for (;; index -= inc, slot--) { 635 m = vm_radix_node_page(rnode->rn_child[slot]); 636 if (m != NULL && m->pindex <= index) 637 return (m); 638 if ((rnode->rn_child[slot] != NULL && 639 m == NULL) || slot == 0) 640 break; 641 } 642 } 643 644 /* 645 * If a valid page or edge smaller than the search slot is 646 * found in the traversal, skip to the next higher-level key. 647 */ 648 if (slot == 0 && (rnode->rn_child[slot] == NULL || m != NULL)) { 649 if (rnode->rn_clev == 0 || vm_radix_declev(&index, 650 maplevels, rnode->rn_clev - 1) > 0) 651 break; 652 goto restart; 653 } 654 rnode = rnode->rn_child[slot]; 655 } 656 return (NULL); 657} 658 659/* 660 * Remove the specified index from the tree. 661 * Panics if the key is not present. 662 */ 663void 664vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 665{ 666 struct vm_radix_node *rnode, *parent; 667 vm_page_t m; 668 int i, slot; 669 670 parent = NULL; 671 rnode = vm_radix_getroot(rtree); 672 for (;;) { 673 if (rnode == NULL) 674 panic("vm_radix_remove: impossible to locate the key"); 675 slot = vm_radix_slot(index, rnode->rn_clev); 676 m = vm_radix_node_page(rnode->rn_child[slot]); 677 if (m != NULL && m->pindex == index) { 678 rnode->rn_child[slot] = NULL; 679 rnode->rn_count--; 680 if (rnode->rn_count > 1) 681 break; 682 if (parent == NULL) { 683 if (rnode->rn_count == 0) { 684 vm_radix_node_put(rnode); 685 vm_radix_setroot(rtree, NULL); 686 } 687 break; 688 } 689 for (i = 0; i < VM_RADIX_COUNT; i++) 690 if (rnode->rn_child[i] != NULL) 691 break; 692 KASSERT(i != VM_RADIX_COUNT, 693 ("%s: invalid node configuration", __func__)); 694 slot = vm_radix_slot(index, parent->rn_clev); 695 KASSERT(parent->rn_child[slot] == rnode, 696 ("%s: invalid child value", __func__)); 697 parent->rn_child[slot] = rnode->rn_child[i]; 698 rnode->rn_count--; 699 rnode->rn_child[i] = NULL; 700 vm_radix_node_put(rnode); 701 break; 702 } 703 if (m != NULL && m->pindex != index) 704 panic("%s: invalid key found", __func__); 705 parent = rnode; 706 rnode = rnode->rn_child[slot]; 707 } 708} 709 710/* 711 * Remove and free all the nodes from the radix tree. 712 * This function is recursive but there is a tight control on it as the 713 * maximum depth of the tree is fixed. 714 */ 715void 716vm_radix_reclaim_allnodes(struct vm_radix *rtree) 717{ 718 struct vm_radix_node *root; 719 720 root = vm_radix_getroot(rtree); 721 if (root == NULL) 722 return; 723 vm_radix_reclaim_allnodes_int(root); 724 vm_radix_setroot(rtree, NULL); 725} 726 727#ifdef DDB 728/* 729 * Show details about the given radix node. 730 */ 731DB_SHOW_COMMAND(radixnode, db_show_radixnode) 732{ 733 struct vm_radix_node *rnode; 734 int i; 735 736 if (!have_addr) 737 return; 738 rnode = (struct vm_radix_node *)addr; 739 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 740 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 741 rnode->rn_clev); 742 for (i = 0; i < VM_RADIX_COUNT; i++) 743 if (rnode->rn_child[i] != NULL) 744 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 745 i, (void *)rnode->rn_child[i], 746 (void *)vm_radix_node_page(rnode->rn_child[i]), 747 rnode->rn_clev); 748} 749#endif /* DDB */
| 299 * This function is recursive. 300 */ 301static void 302vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode) 303{ 304 int slot; 305 306 for (slot = 0; slot < VM_RADIX_COUNT && rnode->rn_count != 0; slot++) { 307 if (rnode->rn_child[slot] == NULL) 308 continue; 309 if (vm_radix_node_page(rnode->rn_child[slot]) == NULL) 310 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]); 311 rnode->rn_count--; 312 } 313 vm_radix_node_put(rnode); 314} 315 316#ifdef INVARIANTS 317/* 318 * Radix node zone destructor. 319 */ 320static void 321vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused) 322{ 323 struct vm_radix_node *rnode; 324 325 rnode = mem; 326 KASSERT(rnode->rn_count == 0, 327 ("vm_radix_node_put: Freeing node %p with %d children\n", mem, 328 rnode->rn_count)); 329} 330#endif 331 332/* 333 * Pre-allocate intermediate nodes from the UMA slab zone. 334 */ 335static void 336vm_radix_prealloc(void *arg __unused) 337{ 338 339 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count)) 340 panic("%s: unable to create new zone", __func__); 341 uma_prealloc(vm_radix_node_zone, cnt.v_page_count); 342} 343SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc, 344 NULL); 345 346/* 347 * Initialize the UMA slab zone. 348 * Until vm_radix_prealloc() is called, the zone will be served by the 349 * UMA boot-time pre-allocated pool of pages. 350 */ 351void 352vm_radix_init(void) 353{ 354 355 vm_radix_node_zone = uma_zcreate("RADIX NODE", 356 sizeof(struct vm_radix_node), NULL, 357#ifdef INVARIANTS 358 vm_radix_node_zone_dtor, 359#else 360 NULL, 361#endif 362 NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE); 363} 364 365/* 366 * Inserts the key-value pair into the trie. 367 * Panics if the key already exists. 368 */ 369void 370vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, vm_page_t page) 371{ 372 vm_pindex_t newind; 373 struct vm_radix_node *rnode, *tmp, *tmp2; 374 vm_page_t m; 375 int slot; 376 uint16_t clev; 377 378 /* 379 * The owner of record for root is not really important because it 380 * will never be used. 381 */ 382 rnode = vm_radix_getroot(rtree); 383 if (rnode == NULL) { 384 rnode = vm_radix_node_get(0, 1, 0); 385 vm_radix_setroot(rtree, rnode); 386 vm_radix_addpage(rnode, index, 0, page); 387 return; 388 } 389 while (rnode != NULL) { 390 if (vm_radix_keybarr(rnode, index)) 391 break; 392 slot = vm_radix_slot(index, rnode->rn_clev); 393 m = vm_radix_node_page(rnode->rn_child[slot]); 394 if (m != NULL) { 395 if (m->pindex == index) 396 panic("%s: key %jx is already present", 397 __func__, (uintmax_t)index); 398 clev = vm_radix_keydiff(m->pindex, index); 399 tmp = vm_radix_node_get(vm_radix_trimkey(index, 400 clev - 1), 2, clev); 401 rnode->rn_child[slot] = tmp; 402 vm_radix_addpage(tmp, index, clev, page); 403 vm_radix_addpage(tmp, m->pindex, clev, m); 404 return; 405 } 406 if (rnode->rn_child[slot] == NULL) { 407 rnode->rn_count++; 408 vm_radix_addpage(rnode, index, rnode->rn_clev, page); 409 return; 410 } 411 rnode = rnode->rn_child[slot]; 412 } 413 if (rnode == NULL) 414 panic("%s: path traversal ended unexpectedly", __func__); 415 416 /* 417 * Scan the trie from the top and find the parent to insert 418 * the new object. 419 */ 420 newind = rnode->rn_owner; 421 clev = vm_radix_keydiff(newind, index); 422 slot = VM_RADIX_COUNT; 423 for (rnode = vm_radix_getroot(rtree); ; rnode = tmp) { 424 KASSERT(rnode != NULL, ("%s: edge cannot be NULL in the scan", 425 __func__)); 426 KASSERT(clev >= rnode->rn_clev, 427 ("%s: unexpected trie depth: clev: %d, rnode->rn_clev: %d", 428 __func__, clev, rnode->rn_clev)); 429 slot = vm_radix_slot(index, rnode->rn_clev); 430 tmp = rnode->rn_child[slot]; 431 KASSERT(tmp != NULL && vm_radix_node_page(tmp) == NULL, 432 ("%s: unexpected lookup interruption", __func__)); 433 if (tmp->rn_clev > clev) 434 break; 435 } 436 KASSERT(rnode != NULL && tmp != NULL && slot < VM_RADIX_COUNT, 437 ("%s: invalid scan parameters rnode: %p, tmp: %p, slot: %d", 438 __func__, (void *)rnode, (void *)tmp, slot)); 439 440 /* 441 * A new node is needed because the right insertion level is reached. 442 * Setup the new intermediate node and add the 2 children: the 443 * new object and the older edge. 444 */ 445 tmp2 = vm_radix_node_get(vm_radix_trimkey(page->pindex, clev - 1), 2, 446 clev); 447 rnode->rn_child[slot] = tmp2; 448 vm_radix_addpage(tmp2, index, clev, page); 449 slot = vm_radix_slot(newind, clev); 450 tmp2->rn_child[slot] = tmp; 451} 452 453/* 454 * Returns the value stored at the index. If the index is not present, 455 * NULL is returned. 456 */ 457vm_page_t 458vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index) 459{ 460 struct vm_radix_node *rnode; 461 vm_page_t m; 462 int slot; 463 464 rnode = vm_radix_getroot(rtree); 465 while (rnode != NULL) { 466 if (vm_radix_keybarr(rnode, index)) 467 return (NULL); 468 slot = vm_radix_slot(index, rnode->rn_clev); 469 rnode = rnode->rn_child[slot]; 470 m = vm_radix_node_page(rnode); 471 if (m != NULL) { 472 if (m->pindex == index) 473 return (m); 474 else 475 return (NULL); 476 } 477 } 478 return (NULL); 479} 480 481/* 482 * Look up the nearest entry at a position bigger than or equal to index. 483 */ 484vm_page_t 485vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index) 486{ 487 vm_pindex_t inc; 488 vm_page_t m; 489 struct vm_radix_node *rnode; 490 int slot; 491 uint16_t difflev; 492 boolean_t maplevels[VM_RADIX_LIMIT + 1]; 493#ifdef INVARIANTS 494 int loops = 0; 495#endif 496 497restart: 498 KASSERT(++loops < 1000, ("%s: too many loops", __func__)); 499 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++) 500 maplevels[difflev] = FALSE; 501 rnode = vm_radix_getroot(rtree); 502 while (rnode != NULL) { 503 maplevels[rnode->rn_clev] = TRUE; 504 505 /* 506 * If the keys differ before the current bisection node 507 * the search key might rollback to the earlierst 508 * available bisection node, or to the smaller value 509 * in the current domain (if the owner is bigger than the 510 * search key). 511 * The search for a valid bisection node is helped through 512 * the use of maplevels array which should bring immediately 513 * a lower useful level, skipping holes. 514 */ 515 if (vm_radix_keybarr(rnode, index)) { 516 difflev = vm_radix_keydiff(index, rnode->rn_owner); 517 if (index > rnode->rn_owner) { 518 if (vm_radix_addlev(&index, maplevels, 519 difflev) > 0) 520 break; 521 } else 522 index = vm_radix_trimkey(rnode->rn_owner, 523 difflev); 524 goto restart; 525 } 526 slot = vm_radix_slot(index, rnode->rn_clev); 527 m = vm_radix_node_page(rnode->rn_child[slot]); 528 if (m != NULL && m->pindex >= index) 529 return (m); 530 if (rnode->rn_child[slot] != NULL && m == NULL) { 531 rnode = rnode->rn_child[slot]; 532 continue; 533 } 534 535 /* 536 * Look for an available edge or page within the current 537 * bisection node. 538 */ 539 if (slot < (VM_RADIX_COUNT - 1)) { 540 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 541 index = vm_radix_trimkey(index, rnode->rn_clev); 542 index += inc; 543 slot++; 544 for (;; index += inc, slot++) { 545 m = vm_radix_node_page(rnode->rn_child[slot]); 546 if (m != NULL && m->pindex >= index) 547 return (m); 548 if ((rnode->rn_child[slot] != NULL && 549 m == NULL) || slot == (VM_RADIX_COUNT - 1)) 550 break; 551 } 552 } 553 554 /* 555 * If a valid page or edge bigger than the search slot is 556 * found in the traversal, skip to the next higher-level key. 557 */ 558 if (slot == (VM_RADIX_COUNT - 1) && 559 (rnode->rn_child[slot] == NULL || m != NULL)) { 560 if (rnode->rn_clev == 0 || vm_radix_addlev(&index, 561 maplevels, rnode->rn_clev - 1) > 0) 562 break; 563 goto restart; 564 } 565 rnode = rnode->rn_child[slot]; 566 } 567 return (NULL); 568} 569 570/* 571 * Look up the nearest entry at a position less than or equal to index. 572 */ 573vm_page_t 574vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index) 575{ 576 vm_pindex_t inc; 577 vm_page_t m; 578 struct vm_radix_node *rnode; 579 int slot; 580 uint16_t difflev; 581 boolean_t maplevels[VM_RADIX_LIMIT + 1]; 582#ifdef INVARIANTS 583 int loops = 0; 584#endif 585 586restart: 587 KASSERT(++loops < 1000, ("%s: too many loops", __func__)); 588 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++) 589 maplevels[difflev] = FALSE; 590 rnode = vm_radix_getroot(rtree); 591 while (rnode != NULL) { 592 maplevels[rnode->rn_clev] = TRUE; 593 594 /* 595 * If the keys differ before the current bisection node 596 * the search key might rollback to the earlierst 597 * available bisection node, or to the higher value 598 * in the current domain (if the owner is smaller than the 599 * search key). 600 * The search for a valid bisection node is helped through 601 * the use of maplevels array which should bring immediately 602 * a lower useful level, skipping holes. 603 */ 604 if (vm_radix_keybarr(rnode, index)) { 605 difflev = vm_radix_keydiff(index, rnode->rn_owner); 606 if (index > rnode->rn_owner) { 607 index = vm_radix_trimkey(rnode->rn_owner, 608 difflev); 609 index |= VM_RADIX_UNITLEVEL(difflev) - 1; 610 } else if (vm_radix_declev(&index, maplevels, 611 difflev) > 0) 612 break; 613 goto restart; 614 } 615 slot = vm_radix_slot(index, rnode->rn_clev); 616 m = vm_radix_node_page(rnode->rn_child[slot]); 617 if (m != NULL && m->pindex <= index) 618 return (m); 619 if (rnode->rn_child[slot] != NULL && m == NULL) { 620 rnode = rnode->rn_child[slot]; 621 continue; 622 } 623 624 /* 625 * Look for an available edge or page within the current 626 * bisection node. 627 */ 628 if (slot > 0) { 629 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev); 630 index = vm_radix_trimkey(index, rnode->rn_clev); 631 index |= inc - 1; 632 index -= inc; 633 slot--; 634 for (;; index -= inc, slot--) { 635 m = vm_radix_node_page(rnode->rn_child[slot]); 636 if (m != NULL && m->pindex <= index) 637 return (m); 638 if ((rnode->rn_child[slot] != NULL && 639 m == NULL) || slot == 0) 640 break; 641 } 642 } 643 644 /* 645 * If a valid page or edge smaller than the search slot is 646 * found in the traversal, skip to the next higher-level key. 647 */ 648 if (slot == 0 && (rnode->rn_child[slot] == NULL || m != NULL)) { 649 if (rnode->rn_clev == 0 || vm_radix_declev(&index, 650 maplevels, rnode->rn_clev - 1) > 0) 651 break; 652 goto restart; 653 } 654 rnode = rnode->rn_child[slot]; 655 } 656 return (NULL); 657} 658 659/* 660 * Remove the specified index from the tree. 661 * Panics if the key is not present. 662 */ 663void 664vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index) 665{ 666 struct vm_radix_node *rnode, *parent; 667 vm_page_t m; 668 int i, slot; 669 670 parent = NULL; 671 rnode = vm_radix_getroot(rtree); 672 for (;;) { 673 if (rnode == NULL) 674 panic("vm_radix_remove: impossible to locate the key"); 675 slot = vm_radix_slot(index, rnode->rn_clev); 676 m = vm_radix_node_page(rnode->rn_child[slot]); 677 if (m != NULL && m->pindex == index) { 678 rnode->rn_child[slot] = NULL; 679 rnode->rn_count--; 680 if (rnode->rn_count > 1) 681 break; 682 if (parent == NULL) { 683 if (rnode->rn_count == 0) { 684 vm_radix_node_put(rnode); 685 vm_radix_setroot(rtree, NULL); 686 } 687 break; 688 } 689 for (i = 0; i < VM_RADIX_COUNT; i++) 690 if (rnode->rn_child[i] != NULL) 691 break; 692 KASSERT(i != VM_RADIX_COUNT, 693 ("%s: invalid node configuration", __func__)); 694 slot = vm_radix_slot(index, parent->rn_clev); 695 KASSERT(parent->rn_child[slot] == rnode, 696 ("%s: invalid child value", __func__)); 697 parent->rn_child[slot] = rnode->rn_child[i]; 698 rnode->rn_count--; 699 rnode->rn_child[i] = NULL; 700 vm_radix_node_put(rnode); 701 break; 702 } 703 if (m != NULL && m->pindex != index) 704 panic("%s: invalid key found", __func__); 705 parent = rnode; 706 rnode = rnode->rn_child[slot]; 707 } 708} 709 710/* 711 * Remove and free all the nodes from the radix tree. 712 * This function is recursive but there is a tight control on it as the 713 * maximum depth of the tree is fixed. 714 */ 715void 716vm_radix_reclaim_allnodes(struct vm_radix *rtree) 717{ 718 struct vm_radix_node *root; 719 720 root = vm_radix_getroot(rtree); 721 if (root == NULL) 722 return; 723 vm_radix_reclaim_allnodes_int(root); 724 vm_radix_setroot(rtree, NULL); 725} 726 727#ifdef DDB 728/* 729 * Show details about the given radix node. 730 */ 731DB_SHOW_COMMAND(radixnode, db_show_radixnode) 732{ 733 struct vm_radix_node *rnode; 734 int i; 735 736 if (!have_addr) 737 return; 738 rnode = (struct vm_radix_node *)addr; 739 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n", 740 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count, 741 rnode->rn_clev); 742 for (i = 0; i < VM_RADIX_COUNT; i++) 743 if (rnode->rn_child[i] != NULL) 744 db_printf("slot: %d, val: %p, page: %p, clev: %d\n", 745 i, (void *)rnode->rn_child[i], 746 (void *)vm_radix_node_page(rnode->rn_child[i]), 747 rnode->rn_clev); 748} 749#endif /* DDB */
|