1219820Sjeff/*- 2219820Sjeff * Copyright (c) 2010 Isilon Systems, Inc. 3219820Sjeff * Copyright (c) 2010 iX Systems, Inc. 4219820Sjeff * Copyright (c) 2010 Panasas, Inc. 5328653Shselasky * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6219820Sjeff * All rights reserved. 7219820Sjeff * 8219820Sjeff * Redistribution and use in source and binary forms, with or without 9219820Sjeff * modification, are permitted provided that the following conditions 10219820Sjeff * are met: 11219820Sjeff * 1. Redistributions of source code must retain the above copyright 12219820Sjeff * notice unmodified, this list of conditions, and the following 13219820Sjeff * disclaimer. 14219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright 15219820Sjeff * notice, this list of conditions and the following disclaimer in the 16219820Sjeff * documentation and/or other materials provided with the distribution. 17219820Sjeff * 18219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19219820Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20219820Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21219820Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22219820Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23219820Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24219820Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25219820Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26219820Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27219820Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28219820Sjeff */ 29219820Sjeff 30289644Shselasky#include <sys/cdefs.h> 31289644Shselasky__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_idr.c 334764 2018-06-07 07:44:54Z hselasky $"); 32289644Shselasky 33219820Sjeff#include <sys/param.h> 34219820Sjeff#include <sys/systm.h> 35219820Sjeff#include <sys/malloc.h> 36219820Sjeff#include <sys/kernel.h> 37219820Sjeff#include <sys/sysctl.h> 38219820Sjeff#include <sys/lock.h> 39219820Sjeff#include <sys/mutex.h> 40219820Sjeff 41219820Sjeff#include <machine/stdarg.h> 42219820Sjeff 43328653Shselasky#include <linux/bitmap.h> 44219820Sjeff#include <linux/kobject.h> 45219820Sjeff#include <linux/slab.h> 46219820Sjeff#include <linux/idr.h> 47219820Sjeff#include <linux/err.h> 48219820Sjeff 49328653Shselasky#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) 50328653Shselasky#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) 51328653Shselasky 52328653Shselaskystruct linux_idr_cache { 53328653Shselasky spinlock_t lock; 54328653Shselasky struct idr_layer *head; 55328653Shselasky unsigned count; 56328653Shselasky}; 57328653Shselasky 58328653Shselaskystatic DPCPU_DEFINE(struct linux_idr_cache, linux_idr_cache); 59328653Shselasky 60219820Sjeff/* 61219820Sjeff * IDR Implementation. 62219820Sjeff * 63219820Sjeff * This is quick and dirty and not as re-entrant as the linux version 64219820Sjeff * however it should be fairly fast. It is basically a radix tree with 65219820Sjeff * a builtin bitmap for allocation. 66219820Sjeff */ 67227293Sedstatic MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat"); 68219820Sjeff 69328653Shselaskystatic struct idr_layer * 70328653Shselaskyidr_preload_dequeue_locked(struct linux_idr_cache *lic) 71328653Shselasky{ 72328653Shselasky struct idr_layer *retval; 73328653Shselasky 74328653Shselasky /* check if wrong thread is trying to dequeue */ 75328653Shselasky if (mtx_owned(&lic->lock.m) == 0) 76328653Shselasky return (NULL); 77328653Shselasky 78328653Shselasky retval = lic->head; 79328653Shselasky if (likely(retval != NULL)) { 80328653Shselasky lic->head = retval->ary[0]; 81328653Shselasky lic->count--; 82328653Shselasky retval->ary[0] = NULL; 83328653Shselasky } 84328653Shselasky return (retval); 85328653Shselasky} 86328653Shselasky 87328653Shselaskystatic void 88328653Shselaskyidr_preload_init(void *arg) 89328653Shselasky{ 90328653Shselasky int cpu; 91328653Shselasky 92328653Shselasky CPU_FOREACH(cpu) { 93328653Shselasky struct linux_idr_cache *lic = 94328653Shselasky DPCPU_ID_PTR(cpu, linux_idr_cache); 95328653Shselasky 96328653Shselasky spin_lock_init(&lic->lock); 97328653Shselasky } 98328653Shselasky} 99328653ShselaskySYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL); 100328653Shselasky 101328653Shselaskystatic void 102328653Shselaskyidr_preload_uninit(void *arg) 103328653Shselasky{ 104328653Shselasky int cpu; 105328653Shselasky 106328653Shselasky CPU_FOREACH(cpu) { 107328653Shselasky struct idr_layer *cacheval; 108328653Shselasky struct linux_idr_cache *lic = 109328653Shselasky DPCPU_ID_PTR(cpu, linux_idr_cache); 110328653Shselasky 111328653Shselasky while (1) { 112328653Shselasky spin_lock(&lic->lock); 113328653Shselasky cacheval = idr_preload_dequeue_locked(lic); 114328653Shselasky spin_unlock(&lic->lock); 115328653Shselasky 116328653Shselasky if (cacheval == NULL) 117328653Shselasky break; 118328653Shselasky free(cacheval, M_IDR); 119328653Shselasky } 120328653Shselasky spin_lock_destroy(&lic->lock); 121328653Shselasky } 122328653Shselasky} 123328653ShselaskySYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL); 124328653Shselasky 125328653Shselaskyvoid 126328653Shselaskyidr_preload(gfp_t gfp_mask) 127328653Shselasky{ 128328653Shselasky struct linux_idr_cache *lic; 129328653Shselasky struct idr_layer *cacheval; 130328653Shselasky 131328653Shselasky sched_pin(); 132328653Shselasky 133328653Shselasky lic = &DPCPU_GET(linux_idr_cache); 134328653Shselasky 135328653Shselasky /* fill up cache */ 136328653Shselasky spin_lock(&lic->lock); 137328653Shselasky while (lic->count < MAX_IDR_FREE) { 138328653Shselasky spin_unlock(&lic->lock); 139328653Shselasky cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask); 140328653Shselasky spin_lock(&lic->lock); 141328653Shselasky if (cacheval == NULL) 142328653Shselasky break; 143328653Shselasky cacheval->ary[0] = lic->head; 144328653Shselasky lic->head = cacheval; 145328653Shselasky lic->count++; 146328653Shselasky } 147328653Shselasky} 148328653Shselasky 149328653Shselaskyvoid 150328653Shselaskyidr_preload_end(void) 151328653Shselasky{ 152328653Shselasky struct linux_idr_cache *lic; 153328653Shselasky 154328653Shselasky lic = &DPCPU_GET(linux_idr_cache); 155328653Shselasky spin_unlock(&lic->lock); 156328653Shselasky sched_unpin(); 157328653Shselasky} 158328653Shselasky 159219820Sjeffstatic inline int 160219820Sjeffidr_max(struct idr *idr) 161219820Sjeff{ 162219820Sjeff return (1 << (idr->layers * IDR_BITS)) - 1; 163219820Sjeff} 164219820Sjeff 165219820Sjeffstatic inline int 166219820Sjeffidr_pos(int id, int layer) 167219820Sjeff{ 168219820Sjeff return (id >> (IDR_BITS * layer)) & IDR_MASK; 169219820Sjeff} 170219820Sjeff 171219820Sjeffvoid 172219820Sjeffidr_init(struct idr *idr) 173219820Sjeff{ 174219820Sjeff bzero(idr, sizeof(*idr)); 175219820Sjeff mtx_init(&idr->lock, "idr", NULL, MTX_DEF); 176219820Sjeff} 177219820Sjeff 178219820Sjeff/* Only frees cached pages. */ 179219820Sjeffvoid 180219820Sjeffidr_destroy(struct idr *idr) 181219820Sjeff{ 182219820Sjeff struct idr_layer *il, *iln; 183219820Sjeff 184276749Shselasky idr_remove_all(idr); 185219820Sjeff mtx_lock(&idr->lock); 186219820Sjeff for (il = idr->free; il != NULL; il = iln) { 187219820Sjeff iln = il->ary[0]; 188219820Sjeff free(il, M_IDR); 189219820Sjeff } 190219820Sjeff mtx_unlock(&idr->lock); 191299421Shselasky mtx_destroy(&idr->lock); 192219820Sjeff} 193219820Sjeff 194219820Sjeffstatic void 195219820Sjeffidr_remove_layer(struct idr_layer *il, int layer) 196219820Sjeff{ 197219820Sjeff int i; 198219820Sjeff 199219820Sjeff if (il == NULL) 200219820Sjeff return; 201219820Sjeff if (layer == 0) { 202219820Sjeff free(il, M_IDR); 203219820Sjeff return; 204219820Sjeff } 205219820Sjeff for (i = 0; i < IDR_SIZE; i++) 206219820Sjeff if (il->ary[i]) 207219820Sjeff idr_remove_layer(il->ary[i], layer - 1); 208219820Sjeff} 209219820Sjeff 210219820Sjeffvoid 211219820Sjeffidr_remove_all(struct idr *idr) 212219820Sjeff{ 213219820Sjeff 214219820Sjeff mtx_lock(&idr->lock); 215219820Sjeff idr_remove_layer(idr->top, idr->layers - 1); 216219820Sjeff idr->top = NULL; 217219820Sjeff idr->layers = 0; 218219820Sjeff mtx_unlock(&idr->lock); 219219820Sjeff} 220219820Sjeff 221334764Shselaskystatic void * 222294505Shselaskyidr_remove_locked(struct idr *idr, int id) 223219820Sjeff{ 224219820Sjeff struct idr_layer *il; 225334764Shselasky void *res; 226219820Sjeff int layer; 227219820Sjeff int idx; 228219820Sjeff 229219820Sjeff id &= MAX_ID_MASK; 230219820Sjeff il = idr->top; 231219820Sjeff layer = idr->layers - 1; 232294505Shselasky if (il == NULL || id > idr_max(idr)) 233334764Shselasky return (NULL); 234219820Sjeff /* 235219820Sjeff * Walk down the tree to this item setting bitmaps along the way 236219820Sjeff * as we know at least one item will be free along this path. 237219820Sjeff */ 238219820Sjeff while (layer && il) { 239219820Sjeff idx = idr_pos(id, layer); 240219820Sjeff il->bitmap |= 1 << idx; 241219820Sjeff il = il->ary[idx]; 242219820Sjeff layer--; 243219820Sjeff } 244219820Sjeff idx = id & IDR_MASK; 245219820Sjeff /* 246219820Sjeff * At this point we've set free space bitmaps up the whole tree. 247219820Sjeff * We could make this non-fatal and unwind but linux dumps a stack 248219820Sjeff * and a warning so I don't think it's necessary. 249219820Sjeff */ 250219820Sjeff if (il == NULL || (il->bitmap & (1 << idx)) != 0) 251219820Sjeff panic("idr_remove: Item %d not allocated (%p, %p)\n", 252219820Sjeff id, idr, il); 253334764Shselasky res = il->ary[idx]; 254219820Sjeff il->ary[idx] = NULL; 255219820Sjeff il->bitmap |= 1 << idx; 256334764Shselasky 257334764Shselasky return (res); 258294505Shselasky} 259294505Shselasky 260334764Shselaskyvoid * 261294505Shselaskyidr_remove(struct idr *idr, int id) 262294505Shselasky{ 263334764Shselasky void *res; 264334764Shselasky 265294505Shselasky mtx_lock(&idr->lock); 266334764Shselasky res = idr_remove_locked(idr, id); 267219820Sjeff mtx_unlock(&idr->lock); 268334764Shselasky 269334764Shselasky return (res); 270219820Sjeff} 271219820Sjeff 272299426Shselasky 273299426Shselaskystatic inline struct idr_layer * 274299426Shselaskyidr_find_layer_locked(struct idr *idr, int id) 275219820Sjeff{ 276219820Sjeff struct idr_layer *il; 277219820Sjeff int layer; 278219820Sjeff 279219820Sjeff id &= MAX_ID_MASK; 280219820Sjeff il = idr->top; 281219820Sjeff layer = idr->layers - 1; 282219820Sjeff if (il == NULL || id > idr_max(idr)) 283299426Shselasky return (NULL); 284219820Sjeff while (layer && il) { 285219820Sjeff il = il->ary[idr_pos(id, layer)]; 286219820Sjeff layer--; 287219820Sjeff } 288299426Shselasky return (il); 289299426Shselasky} 290299426Shselasky 291299426Shselaskyvoid * 292299426Shselaskyidr_replace(struct idr *idr, void *ptr, int id) 293299426Shselasky{ 294299426Shselasky struct idr_layer *il; 295299426Shselasky void *res; 296299426Shselasky int idx; 297299426Shselasky 298299426Shselasky mtx_lock(&idr->lock); 299299426Shselasky il = idr_find_layer_locked(idr, id); 300219820Sjeff idx = id & IDR_MASK; 301299426Shselasky 302299426Shselasky /* Replace still returns an error if the item was not allocated. */ 303299426Shselasky if (il == NULL || (il->bitmap & (1 << idx))) { 304299426Shselasky res = ERR_PTR(-ENOENT); 305299426Shselasky } else { 306219820Sjeff res = il->ary[idx]; 307219820Sjeff il->ary[idx] = ptr; 308219820Sjeff } 309219820Sjeff mtx_unlock(&idr->lock); 310219820Sjeff return (res); 311219820Sjeff} 312219820Sjeff 313282331Smarkjstatic inline void * 314282331Smarkjidr_find_locked(struct idr *idr, int id) 315219820Sjeff{ 316219820Sjeff struct idr_layer *il; 317219820Sjeff void *res; 318219820Sjeff 319282331Smarkj mtx_assert(&idr->lock, MA_OWNED); 320299426Shselasky il = idr_find_layer_locked(idr, id); 321219820Sjeff if (il != NULL) 322219820Sjeff res = il->ary[id & IDR_MASK]; 323299426Shselasky else 324299426Shselasky res = NULL; 325282331Smarkj return (res); 326282331Smarkj} 327282331Smarkj 328282331Smarkjvoid * 329282331Smarkjidr_find(struct idr *idr, int id) 330282331Smarkj{ 331282331Smarkj void *res; 332282331Smarkj 333282331Smarkj mtx_lock(&idr->lock); 334282331Smarkj res = idr_find_locked(idr, id); 335219820Sjeff mtx_unlock(&idr->lock); 336219820Sjeff return (res); 337219820Sjeff} 338219820Sjeff 339299427Shselaskyvoid * 340299427Shselaskyidr_get_next(struct idr *idr, int *nextidp) 341299427Shselasky{ 342299427Shselasky void *res = NULL; 343299427Shselasky int id = *nextidp; 344299427Shselasky 345299427Shselasky mtx_lock(&idr->lock); 346299427Shselasky for (; id <= idr_max(idr); id++) { 347299427Shselasky res = idr_find_locked(idr, id); 348299427Shselasky if (res == NULL) 349299427Shselasky continue; 350299427Shselasky *nextidp = id; 351299427Shselasky break; 352299427Shselasky } 353299427Shselasky mtx_unlock(&idr->lock); 354299427Shselasky return (res); 355299427Shselasky} 356299427Shselasky 357219820Sjeffint 358219820Sjeffidr_pre_get(struct idr *idr, gfp_t gfp_mask) 359219820Sjeff{ 360219820Sjeff struct idr_layer *il, *iln; 361219820Sjeff struct idr_layer *head; 362219820Sjeff int need; 363219820Sjeff 364219820Sjeff mtx_lock(&idr->lock); 365219820Sjeff for (;;) { 366219820Sjeff need = idr->layers + 1; 367219820Sjeff for (il = idr->free; il != NULL; il = il->ary[0]) 368219820Sjeff need--; 369219820Sjeff mtx_unlock(&idr->lock); 370278117Snp if (need <= 0) 371219820Sjeff break; 372219820Sjeff for (head = NULL; need; need--) { 373219820Sjeff iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask); 374219820Sjeff if (iln == NULL) 375219820Sjeff break; 376219820Sjeff bitmap_fill(&iln->bitmap, IDR_SIZE); 377219820Sjeff if (head != NULL) { 378219820Sjeff il->ary[0] = iln; 379219820Sjeff il = iln; 380219820Sjeff } else 381219820Sjeff head = il = iln; 382219820Sjeff } 383219820Sjeff if (head == NULL) 384219820Sjeff return (0); 385219820Sjeff mtx_lock(&idr->lock); 386219820Sjeff il->ary[0] = idr->free; 387219820Sjeff idr->free = head; 388219820Sjeff } 389219820Sjeff return (1); 390219820Sjeff} 391219820Sjeff 392328653Shselaskystatic struct idr_layer * 393328653Shselaskyidr_free_list_get(struct idr *idp) 394219820Sjeff{ 395219820Sjeff struct idr_layer *il; 396219820Sjeff 397328653Shselasky if ((il = idp->free) != NULL) { 398328653Shselasky idp->free = il->ary[0]; 399219820Sjeff il->ary[0] = NULL; 400219820Sjeff } 401328653Shselasky return (il); 402328653Shselasky} 403328653Shselasky 404328653Shselaskystatic inline struct idr_layer * 405328653Shselaskyidr_get(struct idr *idp) 406328653Shselasky{ 407328653Shselasky struct idr_layer *il; 408328653Shselasky 409328653Shselasky if ((il = idr_free_list_get(idp)) != NULL) { 410328653Shselasky MPASS(il->bitmap != 0); 411328653Shselasky } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) { 412301877Smarkj bitmap_fill(&il->bitmap, IDR_SIZE); 413328653Shselasky } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) { 414328653Shselasky bitmap_fill(&il->bitmap, IDR_SIZE); 415328653Shselasky } else { 416328653Shselasky return (NULL); 417328653Shselasky } 418219820Sjeff return (il); 419219820Sjeff} 420219820Sjeff 421219820Sjeff/* 422219820Sjeff * Could be implemented as get_new_above(idr, ptr, 0, idp) but written 423219820Sjeff * first for simplicity sake. 424219820Sjeff */ 425294505Shselaskystatic int 426294505Shselaskyidr_get_new_locked(struct idr *idr, void *ptr, int *idp) 427219820Sjeff{ 428219820Sjeff struct idr_layer *stack[MAX_LEVEL]; 429219820Sjeff struct idr_layer *il; 430219820Sjeff int error; 431219820Sjeff int layer; 432219820Sjeff int idx; 433219820Sjeff int id; 434219820Sjeff 435294505Shselasky mtx_assert(&idr->lock, MA_OWNED); 436294505Shselasky 437219820Sjeff error = -EAGAIN; 438219820Sjeff /* 439219820Sjeff * Expand the tree until there is free space. 440219820Sjeff */ 441219820Sjeff if (idr->top == NULL || idr->top->bitmap == 0) { 442219820Sjeff if (idr->layers == MAX_LEVEL + 1) { 443219820Sjeff error = -ENOSPC; 444219820Sjeff goto out; 445219820Sjeff } 446219820Sjeff il = idr_get(idr); 447219820Sjeff if (il == NULL) 448219820Sjeff goto out; 449219820Sjeff il->ary[0] = idr->top; 450219820Sjeff if (idr->top) 451219820Sjeff il->bitmap &= ~1; 452219820Sjeff idr->top = il; 453219820Sjeff idr->layers++; 454219820Sjeff } 455219820Sjeff il = idr->top; 456219820Sjeff id = 0; 457219820Sjeff /* 458219820Sjeff * Walk the tree following free bitmaps, record our path. 459219820Sjeff */ 460219820Sjeff for (layer = idr->layers - 1;; layer--) { 461219820Sjeff stack[layer] = il; 462219820Sjeff idx = ffsl(il->bitmap); 463219820Sjeff if (idx == 0) 464219820Sjeff panic("idr_get_new: Invalid leaf state (%p, %p)\n", 465219820Sjeff idr, il); 466219820Sjeff idx--; 467219820Sjeff id |= idx << (layer * IDR_BITS); 468219820Sjeff if (layer == 0) 469219820Sjeff break; 470219820Sjeff if (il->ary[idx] == NULL) { 471219820Sjeff il->ary[idx] = idr_get(idr); 472219820Sjeff if (il->ary[idx] == NULL) 473219820Sjeff goto out; 474219820Sjeff } 475219820Sjeff il = il->ary[idx]; 476219820Sjeff } 477219820Sjeff /* 478219820Sjeff * Allocate the leaf to the consumer. 479219820Sjeff */ 480219820Sjeff il->bitmap &= ~(1 << idx); 481219820Sjeff il->ary[idx] = ptr; 482219820Sjeff *idp = id; 483219820Sjeff /* 484219820Sjeff * Clear bitmaps potentially up to the root. 485219820Sjeff */ 486219820Sjeff while (il->bitmap == 0 && ++layer < idr->layers) { 487219820Sjeff il = stack[layer]; 488219820Sjeff il->bitmap &= ~(1 << idr_pos(id, layer)); 489219820Sjeff } 490219820Sjeff error = 0; 491219820Sjeffout: 492219820Sjeff#ifdef INVARIANTS 493282331Smarkj if (error == 0 && idr_find_locked(idr, id) != ptr) { 494219820Sjeff panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n", 495219820Sjeff idr, id, ptr); 496219820Sjeff } 497219820Sjeff#endif 498219820Sjeff return (error); 499219820Sjeff} 500219820Sjeff 501219820Sjeffint 502294505Shselaskyidr_get_new(struct idr *idr, void *ptr, int *idp) 503219820Sjeff{ 504294505Shselasky int retval; 505294505Shselasky 506294505Shselasky mtx_lock(&idr->lock); 507294505Shselasky retval = idr_get_new_locked(idr, ptr, idp); 508294505Shselasky mtx_unlock(&idr->lock); 509294505Shselasky return (retval); 510294505Shselasky} 511294505Shselasky 512294505Shselaskystatic int 513294505Shselaskyidr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp) 514294505Shselasky{ 515219820Sjeff struct idr_layer *stack[MAX_LEVEL]; 516219820Sjeff struct idr_layer *il; 517219820Sjeff int error; 518219820Sjeff int layer; 519219820Sjeff int idx, sidx; 520219820Sjeff int id; 521219820Sjeff 522294505Shselasky mtx_assert(&idr->lock, MA_OWNED); 523294505Shselasky 524219820Sjeff error = -EAGAIN; 525219820Sjeff /* 526219820Sjeff * Compute the layers required to support starting_id and the mask 527219820Sjeff * at the top layer. 528219820Sjeff */ 529219820Sjeffrestart: 530219820Sjeff idx = starting_id; 531219820Sjeff layer = 0; 532219820Sjeff while (idx & ~IDR_MASK) { 533219820Sjeff layer++; 534219820Sjeff idx >>= IDR_BITS; 535219820Sjeff } 536219820Sjeff if (layer == MAX_LEVEL + 1) { 537219820Sjeff error = -ENOSPC; 538219820Sjeff goto out; 539219820Sjeff } 540219820Sjeff /* 541219820Sjeff * Expand the tree until there is free space at or beyond starting_id. 542219820Sjeff */ 543219820Sjeff while (idr->layers <= layer || 544219820Sjeff idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) { 545219820Sjeff if (idr->layers == MAX_LEVEL + 1) { 546219820Sjeff error = -ENOSPC; 547219820Sjeff goto out; 548219820Sjeff } 549219820Sjeff il = idr_get(idr); 550219820Sjeff if (il == NULL) 551219820Sjeff goto out; 552219820Sjeff il->ary[0] = idr->top; 553219820Sjeff if (idr->top && idr->top->bitmap == 0) 554219820Sjeff il->bitmap &= ~1; 555219820Sjeff idr->top = il; 556219820Sjeff idr->layers++; 557219820Sjeff } 558219820Sjeff il = idr->top; 559219820Sjeff id = 0; 560219820Sjeff /* 561219820Sjeff * Walk the tree following free bitmaps, record our path. 562219820Sjeff */ 563219820Sjeff for (layer = idr->layers - 1;; layer--) { 564219820Sjeff stack[layer] = il; 565219820Sjeff sidx = idr_pos(starting_id, layer); 566219820Sjeff /* Returns index numbered from 0 or size if none exists. */ 567219820Sjeff idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx); 568219820Sjeff if (idx == IDR_SIZE && sidx == 0) 569219820Sjeff panic("idr_get_new: Invalid leaf state (%p, %p)\n", 570219820Sjeff idr, il); 571219820Sjeff /* 572219820Sjeff * We may have walked a path where there was a free bit but 573219820Sjeff * it was lower than what we wanted. Restart the search with 574219820Sjeff * a larger starting id. id contains the progress we made so 575219820Sjeff * far. Search the leaf one above this level. This may 576219820Sjeff * restart as many as MAX_LEVEL times but that is expected 577219820Sjeff * to be rare. 578219820Sjeff */ 579219820Sjeff if (idx == IDR_SIZE) { 580277229Snp starting_id = id + (1 << ((layer + 1) * IDR_BITS)); 581219820Sjeff goto restart; 582219820Sjeff } 583219820Sjeff if (idx > sidx) 584219820Sjeff starting_id = 0; /* Search the whole subtree. */ 585219820Sjeff id |= idx << (layer * IDR_BITS); 586219820Sjeff if (layer == 0) 587219820Sjeff break; 588219820Sjeff if (il->ary[idx] == NULL) { 589219820Sjeff il->ary[idx] = idr_get(idr); 590219820Sjeff if (il->ary[idx] == NULL) 591219820Sjeff goto out; 592219820Sjeff } 593219820Sjeff il = il->ary[idx]; 594219820Sjeff } 595219820Sjeff /* 596219820Sjeff * Allocate the leaf to the consumer. 597219820Sjeff */ 598219820Sjeff il->bitmap &= ~(1 << idx); 599219820Sjeff il->ary[idx] = ptr; 600219820Sjeff *idp = id; 601219820Sjeff /* 602219820Sjeff * Clear bitmaps potentially up to the root. 603219820Sjeff */ 604219820Sjeff while (il->bitmap == 0 && ++layer < idr->layers) { 605219820Sjeff il = stack[layer]; 606219820Sjeff il->bitmap &= ~(1 << idr_pos(id, layer)); 607219820Sjeff } 608219820Sjeff error = 0; 609219820Sjeffout: 610219820Sjeff#ifdef INVARIANTS 611282331Smarkj if (error == 0 && idr_find_locked(idr, id) != ptr) { 612219820Sjeff panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n", 613219820Sjeff idr, id, ptr); 614219820Sjeff } 615219820Sjeff#endif 616219820Sjeff return (error); 617219820Sjeff} 618294505Shselasky 619294505Shselaskyint 620294505Shselaskyidr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp) 621294505Shselasky{ 622294505Shselasky int retval; 623294505Shselasky 624294505Shselasky mtx_lock(&idr->lock); 625294505Shselasky retval = idr_get_new_above_locked(idr, ptr, starting_id, idp); 626294505Shselasky mtx_unlock(&idr->lock); 627294505Shselasky return (retval); 628294505Shselasky} 629294505Shselasky 630299427Shselaskyint 631299427Shselaskyida_get_new_above(struct ida *ida, int starting_id, int *p_id) 632299427Shselasky{ 633299427Shselasky return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id)); 634299427Shselasky} 635299427Shselasky 636294505Shselaskystatic int 637294505Shselaskyidr_alloc_locked(struct idr *idr, void *ptr, int start, int end) 638294505Shselasky{ 639294505Shselasky int max = end > 0 ? end - 1 : INT_MAX; 640294505Shselasky int error; 641294505Shselasky int id; 642294505Shselasky 643294505Shselasky mtx_assert(&idr->lock, MA_OWNED); 644294505Shselasky 645294505Shselasky if (unlikely(start < 0)) 646294505Shselasky return (-EINVAL); 647294505Shselasky if (unlikely(max < start)) 648294505Shselasky return (-ENOSPC); 649294505Shselasky 650294505Shselasky if (start == 0) 651294505Shselasky error = idr_get_new_locked(idr, ptr, &id); 652294505Shselasky else 653294505Shselasky error = idr_get_new_above_locked(idr, ptr, start, &id); 654294505Shselasky 655294505Shselasky if (unlikely(error < 0)) 656294505Shselasky return (error); 657294505Shselasky if (unlikely(id > max)) { 658294505Shselasky idr_remove_locked(idr, id); 659294505Shselasky return (-ENOSPC); 660294505Shselasky } 661294505Shselasky return (id); 662294505Shselasky} 663294505Shselasky 664294505Shselaskyint 665294505Shselaskyidr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 666294505Shselasky{ 667294505Shselasky int retval; 668294505Shselasky 669294505Shselasky mtx_lock(&idr->lock); 670294505Shselasky retval = idr_alloc_locked(idr, ptr, start, end); 671294505Shselasky mtx_unlock(&idr->lock); 672294505Shselasky return (retval); 673294505Shselasky} 674294505Shselasky 675294505Shselaskyint 676294505Shselaskyidr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 677294505Shselasky{ 678294505Shselasky int retval; 679294505Shselasky 680294505Shselasky mtx_lock(&idr->lock); 681294505Shselasky retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end); 682294505Shselasky if (unlikely(retval == -ENOSPC)) 683294505Shselasky retval = idr_alloc_locked(idr, ptr, start, end); 684294505Shselasky if (likely(retval >= 0)) 685294505Shselasky idr->next_cyclic_id = retval + 1; 686294505Shselasky mtx_unlock(&idr->lock); 687294505Shselasky return (retval); 688294505Shselasky} 689299427Shselasky 690299427Shselaskystatic int 691328654Shselaskyidr_for_each_layer(struct idr_layer *il, int offset, int layer, 692299427Shselasky int (*f)(int id, void *p, void *data), void *data) 693299427Shselasky{ 694299427Shselasky int i, err; 695299427Shselasky 696299427Shselasky if (il == NULL) 697299427Shselasky return (0); 698299427Shselasky if (layer == 0) { 699299427Shselasky for (i = 0; i < IDR_SIZE; i++) { 700299427Shselasky if (il->ary[i] == NULL) 701299427Shselasky continue; 702328654Shselasky err = f(i + offset, il->ary[i], data); 703299427Shselasky if (err) 704299427Shselasky return (err); 705299427Shselasky } 706299427Shselasky return (0); 707299427Shselasky } 708299427Shselasky for (i = 0; i < IDR_SIZE; i++) { 709299427Shselasky if (il->ary[i] == NULL) 710299427Shselasky continue; 711328654Shselasky err = idr_for_each_layer(il->ary[i], 712328654Shselasky (i + offset) * IDR_SIZE, layer - 1, f, data); 713299427Shselasky if (err) 714299427Shselasky return (err); 715299427Shselasky } 716299427Shselasky return (0); 717299427Shselasky} 718299427Shselasky 719299469Shselasky/* NOTE: It is not allowed to modify the IDR tree while it is being iterated */ 720299427Shselaskyint 721299427Shselaskyidr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data) 722299427Shselasky{ 723328654Shselasky return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data)); 724299427Shselasky} 725299427Shselasky 726334764Shselaskystatic int 727334764Shselaskyidr_has_entry(int id, void *p, void *data) 728334764Shselasky{ 729334764Shselasky 730334764Shselasky return (1); 731334764Shselasky} 732334764Shselasky 733334764Shselaskybool 734334764Shselaskyidr_is_empty(struct idr *idp) 735334764Shselasky{ 736334764Shselasky 737334764Shselasky return (idr_for_each(idp, idr_has_entry, NULL) == 0); 738334764Shselasky} 739334764Shselasky 740299427Shselaskyint 741299427Shselaskyida_pre_get(struct ida *ida, gfp_t flags) 742299427Shselasky{ 743299427Shselasky if (idr_pre_get(&ida->idr, flags) == 0) 744299427Shselasky return (0); 745299427Shselasky 746299427Shselasky if (ida->free_bitmap == NULL) { 747299427Shselasky ida->free_bitmap = 748299427Shselasky malloc(sizeof(struct ida_bitmap), M_IDR, flags); 749299427Shselasky } 750299427Shselasky return (ida->free_bitmap != NULL); 751299427Shselasky} 752299427Shselasky 753299427Shselaskyint 754299427Shselaskyida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 755299427Shselasky gfp_t flags) 756299427Shselasky{ 757299427Shselasky int ret, id; 758299427Shselasky unsigned int max; 759299427Shselasky 760299427Shselasky MPASS((int)start >= 0); 761299427Shselasky MPASS((int)end >= 0); 762299427Shselasky 763299427Shselasky if (end == 0) 764299427Shselasky max = 0x80000000; 765299427Shselasky else { 766299427Shselasky MPASS(end > start); 767299427Shselasky max = end - 1; 768299427Shselasky } 769299427Shselaskyagain: 770299427Shselasky if (!ida_pre_get(ida, flags)) 771299427Shselasky return (-ENOMEM); 772299427Shselasky 773299427Shselasky if ((ret = ida_get_new_above(ida, start, &id)) == 0) { 774299427Shselasky if (id > max) { 775299427Shselasky ida_remove(ida, id); 776299427Shselasky ret = -ENOSPC; 777299427Shselasky } else { 778299427Shselasky ret = id; 779299427Shselasky } 780299427Shselasky } 781299427Shselasky if (__predict_false(ret == -EAGAIN)) 782299427Shselasky goto again; 783299427Shselasky 784299427Shselasky return (ret); 785299427Shselasky} 786299427Shselasky 787299427Shselaskyvoid 788299427Shselaskyida_simple_remove(struct ida *ida, unsigned int id) 789299427Shselasky{ 790299427Shselasky idr_remove(&ida->idr, id); 791299427Shselasky} 792299427Shselasky 793299427Shselaskyvoid 794299427Shselaskyida_remove(struct ida *ida, int id) 795331756Semaste{ 796299427Shselasky idr_remove(&ida->idr, id); 797299427Shselasky} 798299427Shselasky 799299427Shselaskyvoid 800299427Shselaskyida_init(struct ida *ida) 801299427Shselasky{ 802299427Shselasky idr_init(&ida->idr); 803299427Shselasky} 804299427Shselasky 805299427Shselaskyvoid 806299427Shselaskyida_destroy(struct ida *ida) 807299427Shselasky{ 808299427Shselasky idr_destroy(&ida->idr); 809299427Shselasky free(ida->free_bitmap, M_IDR); 810299427Shselasky} 811