1/* sl_malloc.c - malloc routines using a per-thread slab */ 2/* $OpenLDAP$ */ 3/* This work is part of OpenLDAP Software <http://www.openldap.org/>. 4 * 5 * Copyright 2003-2011 The OpenLDAP Foundation. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted only as authorized by the OpenLDAP 10 * Public License. 11 * 12 * A copy of this license is available in the file LICENSE in the 13 * top-level directory of the distribution or, alternatively, at 14 * <http://www.OpenLDAP.org/license.html>. 15 */ 16 17#include "portable.h" 18 19#include <stdio.h> 20#include <ac/string.h> 21 22#include "slap.h" 23 24#ifdef USE_VALGRIND 25/* Get debugging help from Valgrind */ 26#include <valgrind/memcheck.h> 27#define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s) 28#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z) 29#define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s) 30#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s) 31#define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s) 32#else 33#define VGMEMP_MARK(m,s) 34#define VGMEMP_CREATE(h,r,z) 35#define VGMEMP_TRIM(h,a,s) 36#define VGMEMP_ALLOC(h,a,s) 37#define VGMEMP_CHANGE(h,a,b,s) 38#endif 39 40/* 41 * This allocator returns temporary memory from a slab in a given memory 42 * context, aligned on a 2-int boundary. It cannot be used for data 43 * which will outlive the task allocating it. 44 * 45 * A new memory context attaches to the creator's thread context, if any. 46 * Threads cannot use other threads' memory contexts; there are no locks. 47 * 48 * The caller of slap_sl_malloc, usually a thread pool task, must 49 * slap_sl_free the memory before finishing: New tasks reuse the context 50 * and normally reset it, reclaiming memory left over from last task. 51 * 52 * The allocator helps memory fragmentation, speed and memory leaks. 53 * It is not (yet) reliable as a garbage collector: 54 * 55 * It falls back to context NULL - plain ber_memalloc() - when the 56 * context's slab is full. A reset does not reclaim such memory. 57 * Conversely, free/realloc of data not from the given context assumes 58 * context NULL. The data must not belong to another memory context. 59 * 60 * Code which has lost track of the current memory context can try 61 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc(). 62 * 63 * Allocations cannot yet return failure. Like ch_malloc, they succeed 64 * or abort slapd. This will change, do fix code which assumes success. 65 */ 66 67/* 68 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at 69 * allocated blocks' head - and in freed blocks also at the tail, marked 70 * by ORing *next* block's head with 1. Freed blocks are only reclaimed 71 * from the last block forward. This is fast, but when a block is never 72 * freed, older blocks will not be reclaimed until the slab is reset... 73 */ 74 75#ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */ 76enum { No_sl_malloc = 1 }; 77#else 78enum { No_sl_malloc = 0 }; 79#endif 80 81#define SLAP_SLAB_SOBLOCK 64 82 83struct slab_object { 84 void *so_ptr; 85 int so_blockhead; 86 LDAP_LIST_ENTRY(slab_object) so_link; 87}; 88 89struct slab_heap { 90 void *sh_base; 91 void *sh_last; 92 void *sh_end; 93 int sh_stack; 94 int sh_maxorder; 95 unsigned char **sh_map; 96 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free; 97 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool; 98}; 99 100enum { 101 Align = sizeof(ber_len_t) > 2*sizeof(int) 102 ? sizeof(ber_len_t) : 2*sizeof(int), 103 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16), 104 order_start = Align_log2 - 1, 105 pad = Align - 1 106}; 107 108static struct slab_object * slap_replenish_sopool(struct slab_heap* sh); 109#ifdef SLAPD_UNUSED 110static void print_slheap(int level, void *ctx); 111#endif 112 113/* Keep memory context in a thread-local var, or in a global when no threads */ 114#ifdef NO_THREADS 115static struct slab_heap *slheap; 116# define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx))) 117# define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap) 118#else 119# define memctx_key ((void *) slap_sl_mem_init) 120# define SET_MEMCTX(thrctx, memctx, kfree) \ 121 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL) 122# define GET_MEMCTX(thrctx, memctxp) \ 123 ((void) (*(memctxp) = NULL), \ 124 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \ 125 *(memctxp)) 126#endif /* NO_THREADS */ 127 128 129/* Destroy the context, or if key==NULL clean it up for reuse. */ 130void 131slap_sl_mem_destroy( 132 void *key, 133 void *data 134) 135{ 136 struct slab_heap *sh = data; 137 struct slab_object *so; 138 int i; 139 140 if (!sh->sh_stack) { 141 for (i = 0; i <= sh->sh_maxorder - order_start; i++) { 142 so = LDAP_LIST_FIRST(&sh->sh_free[i]); 143 while (so) { 144 struct slab_object *so_tmp = so; 145 so = LDAP_LIST_NEXT(so, so_link); 146 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link); 147 } 148 ch_free(sh->sh_map[i]); 149 } 150 ch_free(sh->sh_free); 151 ch_free(sh->sh_map); 152 153 so = LDAP_LIST_FIRST(&sh->sh_sopool); 154 while (so) { 155 struct slab_object *so_tmp = so; 156 so = LDAP_LIST_NEXT(so, so_link); 157 if (!so_tmp->so_blockhead) { 158 LDAP_LIST_REMOVE(so_tmp, so_link); 159 } 160 } 161 so = LDAP_LIST_FIRST(&sh->sh_sopool); 162 while (so) { 163 struct slab_object *so_tmp = so; 164 so = LDAP_LIST_NEXT(so, so_link); 165 ch_free(so_tmp); 166 } 167 } 168 169 if (key != NULL) { 170 ber_memfree_x(sh->sh_base, NULL); 171 ber_memfree_x(sh, NULL); 172 } 173} 174 175BerMemoryFunctions slap_sl_mfuncs = 176 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free }; 177 178void 179slap_sl_mem_init() 180{ 181 assert( Align == 1 << Align_log2 ); 182 183 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs ); 184} 185 186/* Create, reset or just return the memory context of the current thread. */ 187void * 188slap_sl_mem_create( 189 ber_len_t size, 190 int stack, 191 void *thrctx, 192 int new 193) 194{ 195 void *memctx; 196 struct slab_heap *sh; 197 ber_len_t size_shift; 198 struct slab_object *so; 199 char *base, *newptr; 200 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align }; 201 202 sh = GET_MEMCTX(thrctx, &memctx); 203 if ( sh && !new ) 204 return sh; 205 206 /* Round up to doubleword boundary, then make room for initial 207 * padding, preserving expected available size for pool version */ 208 size = ((size + Align-1) & -Align) + Base_offset; 209 210 if (!sh) { 211 sh = ch_malloc(sizeof(struct slab_heap)); 212 base = ch_malloc(size); 213 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy); 214 VGMEMP_MARK(base, size); 215 VGMEMP_CREATE(sh, 0, 0); 216 } else { 217 slap_sl_mem_destroy(NULL, sh); 218 base = sh->sh_base; 219 if (size > (ber_len_t) ((char *) sh->sh_end - base)) { 220 newptr = ch_realloc(base, size); 221 if ( newptr == NULL ) return NULL; 222 VGMEMP_CHANGE(sh, base, newptr, size); 223 base = newptr; 224 } 225 VGMEMP_TRIM(sh, sh->sh_base, 0); 226 } 227 sh->sh_base = base; 228 sh->sh_end = base + size; 229 230 /* Align (base + head of first block) == first returned block */ 231 base += Base_offset; 232 size -= Base_offset; 233 234 sh->sh_stack = stack; 235 if (stack) { 236 sh->sh_last = base; 237 238 } else { 239 int i, order = -1, order_end = -1; 240 241 size_shift = size - 1; 242 do { 243 order_end++; 244 } while (size_shift >>= 1); 245 order = order_end - order_start + 1; 246 sh->sh_maxorder = order_end; 247 248 sh->sh_free = (struct sh_freelist *) 249 ch_malloc(order * sizeof(struct sh_freelist)); 250 for (i = 0; i < order; i++) { 251 LDAP_LIST_INIT(&sh->sh_free[i]); 252 } 253 254 LDAP_LIST_INIT(&sh->sh_sopool); 255 256 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 257 slap_replenish_sopool(sh); 258 } 259 so = LDAP_LIST_FIRST(&sh->sh_sopool); 260 LDAP_LIST_REMOVE(so, so_link); 261 so->so_ptr = base; 262 263 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link); 264 265 sh->sh_map = (unsigned char **) 266 ch_malloc(order * sizeof(unsigned char *)); 267 for (i = 0; i < order; i++) { 268 int shiftamt = order_start + 1 + i; 269 int nummaps = size >> shiftamt; 270 assert(nummaps); 271 nummaps >>= 3; 272 if (!nummaps) nummaps = 1; 273 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps); 274 memset(sh->sh_map[i], 0, nummaps); 275 } 276 } 277 278 return sh; 279} 280 281/* 282 * Separate memory context from thread context. Future users must 283 * know the context, since ch_free/slap_sl_context() cannot find it. 284 */ 285void 286slap_sl_mem_detach( 287 void *thrctx, 288 void *memctx 289) 290{ 291 SET_MEMCTX(thrctx, NULL, 0); 292} 293 294void * 295slap_sl_malloc( 296 ber_len_t size, 297 void *ctx 298) 299{ 300 struct slab_heap *sh = ctx; 301 ber_len_t *ptr, *newptr; 302 303 /* ber_set_option calls us like this */ 304 if (No_sl_malloc || !ctx) { 305 newptr = ber_memalloc_x( size, NULL ); 306 if ( newptr ) return newptr; 307 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n", 308 (unsigned long) size, 0, 0); 309 assert( 0 ); 310 exit( EXIT_FAILURE ); 311 } 312 313 /* Add room for head, ensure room for tail when freed, and 314 * round up to doubleword boundary. */ 315 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align; 316 317 if (sh->sh_stack) { 318 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) { 319 newptr = sh->sh_last; 320 sh->sh_last = (char *) sh->sh_last + size; 321 VGMEMP_ALLOC(sh, newptr, size); 322 *newptr++ = size; 323 return( (void *)newptr ); 324 } 325 326 size -= sizeof(ber_len_t); 327 328 } else { 329 struct slab_object *so_new, *so_left, *so_right; 330 ber_len_t size_shift; 331 unsigned long diff; 332 int i, j, order = -1; 333 334 size_shift = size - 1; 335 do { 336 order++; 337 } while (size_shift >>= 1); 338 339 size -= sizeof(ber_len_t); 340 341 for (i = order; i <= sh->sh_maxorder && 342 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++); 343 344 if (i == order) { 345 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]); 346 LDAP_LIST_REMOVE(so_new, so_link); 347 ptr = so_new->so_ptr; 348 diff = (unsigned long)((char*)ptr - 349 (char*)sh->sh_base) >> (order + 1); 350 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7)); 351 *ptr++ = size; 352 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link); 353 return((void*)ptr); 354 } else if (i <= sh->sh_maxorder) { 355 for (j = i; j > order; j--) { 356 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]); 357 LDAP_LIST_REMOVE(so_left, so_link); 358 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 359 slap_replenish_sopool(sh); 360 } 361 so_right = LDAP_LIST_FIRST(&sh->sh_sopool); 362 LDAP_LIST_REMOVE(so_right, so_link); 363 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j)); 364 if (j == order + 1) { 365 ptr = so_left->so_ptr; 366 diff = (unsigned long)((char*)ptr - 367 (char*)sh->sh_base) >> (order+1); 368 sh->sh_map[order-order_start][diff>>3] |= 369 (1 << (diff & 0x7)); 370 *ptr++ = size; 371 LDAP_LIST_INSERT_HEAD( 372 &sh->sh_free[j-1-order_start], so_right, so_link); 373 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link); 374 return((void*)ptr); 375 } else { 376 LDAP_LIST_INSERT_HEAD( 377 &sh->sh_free[j-1-order_start], so_right, so_link); 378 LDAP_LIST_INSERT_HEAD( 379 &sh->sh_free[j-1-order_start], so_left, so_link); 380 } 381 } 382 } 383 /* FIXME: missing return; guessing we failed... */ 384 } 385 386 Debug(LDAP_DEBUG_TRACE, 387 "sl_malloc %lu: ch_malloc\n", 388 (unsigned long) size, 0, 0); 389 return ch_malloc(size); 390} 391 392#define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \ 393 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \ 394 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U) 395 396void * 397slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx ) 398{ 399 void *newptr; 400 ber_len_t total = n * size; 401 402 /* The sqrt test is a slight optimization: often avoids the division */ 403 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) { 404 newptr = slap_sl_malloc( total, ctx ); 405 memset( newptr, 0, n*size ); 406 } else { 407 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n", 408 (unsigned long) n, (unsigned long) size, 0); 409 assert(0); 410 exit(EXIT_FAILURE); 411 } 412 return newptr; 413} 414 415void * 416slap_sl_realloc(void *ptr, ber_len_t size, void *ctx) 417{ 418 struct slab_heap *sh = ctx; 419 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp; 420 void *newptr; 421 422 if (ptr == NULL) 423 return slap_sl_malloc(size, ctx); 424 425 /* Not our memory? */ 426 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) { 427 /* Like ch_realloc(), except not trying a new context */ 428 newptr = ber_memrealloc_x(ptr, size, NULL); 429 if (newptr) { 430 return newptr; 431 } 432 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n", 433 (unsigned long) size, 0, 0); 434 assert(0); 435 exit( EXIT_FAILURE ); 436 } 437 438 if (size == 0) { 439 slap_sl_free(ptr, ctx); 440 return NULL; 441 } 442 443 oldsize = p[-1]; 444 445 if (sh->sh_stack) { 446 /* Add room for head, round up to doubleword boundary */ 447 size = (size + sizeof(ber_len_t) + Align-1) & -Align; 448 449 p--; 450 451 /* Never shrink blocks */ 452 if (size <= oldsize) { 453 return ptr; 454 } 455 456 oldsize &= -2; 457 nextp = (ber_len_t *) ((char *) p + oldsize); 458 459 /* If reallocing the last block, try to grow it */ 460 if (nextp == sh->sh_last) { 461 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) { 462 sh->sh_last = (char *) p + size; 463 p[0] = (p[0] & 1) | size; 464 return ptr; 465 } 466 467 /* Nowhere to grow, need to alloc and copy */ 468 } else { 469 /* Slight optimization of the final realloc variant */ 470 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx); 471 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t)); 472 /* Not last block, can just mark old region as free */ 473 nextp[-1] = oldsize; 474 nextp[0] |= 1; 475 return newptr; 476 } 477 478 size -= sizeof(ber_len_t); 479 oldsize -= sizeof(ber_len_t); 480 481 } else if (oldsize > size) { 482 oldsize = size; 483 } 484 485 newptr = slap_sl_malloc(size, ctx); 486 AC_MEMCPY(newptr, ptr, oldsize); 487 slap_sl_free(ptr, ctx); 488 return newptr; 489} 490 491void 492slap_sl_free(void *ptr, void *ctx) 493{ 494 struct slab_heap *sh = ctx; 495 ber_len_t size; 496 ber_len_t *p = ptr, *nextp, *tmpp; 497 498 if (!ptr) 499 return; 500 501 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) { 502 ber_memfree_x(ptr, NULL); 503 return; 504 } 505 506 size = *(--p); 507 508 if (sh->sh_stack) { 509 size &= -2; 510 nextp = (ber_len_t *) ((char *) p + size); 511 if (sh->sh_last != nextp) { 512 /* Mark it free: tail = size, head of next block |= 1 */ 513 nextp[-1] = size; 514 nextp[0] |= 1; 515 /* We can't tell Valgrind about it yet, because we 516 * still need read/write access to this block for 517 * when we eventually get to reclaim it. 518 */ 519 } else { 520 /* Reclaim freed block(s) off tail */ 521 while (*p & 1) { 522 p = (ber_len_t *) ((char *) p - p[-1]); 523 } 524 sh->sh_last = p; 525 VGMEMP_TRIM(sh, sh->sh_base, sh->sh_last - sh->sh_base); 526 } 527 528 } else { 529 int size_shift, order_size; 530 struct slab_object *so; 531 unsigned long diff; 532 int i, inserted = 0, order = -1; 533 534 size_shift = size + sizeof(ber_len_t) - 1; 535 do { 536 order++; 537 } while (size_shift >>= 1); 538 539 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) { 540 order_size = 1 << (i+1); 541 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1); 542 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7))); 543 if (diff == ((diff>>1)<<1)) { 544 if (!(sh->sh_map[i-order_start][(diff+1)>>3] & 545 (1<<((diff+1)&0x7)))) { 546 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]); 547 while (so) { 548 if ((char*)so->so_ptr == (char*)tmpp) { 549 LDAP_LIST_REMOVE( so, so_link ); 550 } else if ((char*)so->so_ptr == 551 (char*)tmpp + order_size) { 552 LDAP_LIST_REMOVE(so, so_link); 553 break; 554 } 555 so = LDAP_LIST_NEXT(so, so_link); 556 } 557 if (so) { 558 if (i < sh->sh_maxorder) { 559 inserted = 1; 560 so->so_ptr = tmpp; 561 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], 562 so, so_link); 563 } 564 continue; 565 } else { 566 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 567 slap_replenish_sopool(sh); 568 } 569 so = LDAP_LIST_FIRST(&sh->sh_sopool); 570 LDAP_LIST_REMOVE(so, so_link); 571 so->so_ptr = tmpp; 572 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start], 573 so, so_link); 574 break; 575 576 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: " 577 "free object not found while bit is clear.\n", 578 0, 0, 0); 579 assert(so != NULL); 580 581 } 582 } else { 583 if (!inserted) { 584 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 585 slap_replenish_sopool(sh); 586 } 587 so = LDAP_LIST_FIRST(&sh->sh_sopool); 588 LDAP_LIST_REMOVE(so, so_link); 589 so->so_ptr = tmpp; 590 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start], 591 so, so_link); 592 } 593 break; 594 } 595 } else { 596 if (!(sh->sh_map[i-order_start][(diff-1)>>3] & 597 (1<<((diff-1)&0x7)))) { 598 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]); 599 while (so) { 600 if ((char*)so->so_ptr == (char*)tmpp) { 601 LDAP_LIST_REMOVE(so, so_link); 602 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) { 603 LDAP_LIST_REMOVE(so, so_link); 604 tmpp = so->so_ptr; 605 break; 606 } 607 so = LDAP_LIST_NEXT(so, so_link); 608 } 609 if (so) { 610 if (i < sh->sh_maxorder) { 611 inserted = 1; 612 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link); 613 continue; 614 } 615 } else { 616 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 617 slap_replenish_sopool(sh); 618 } 619 so = LDAP_LIST_FIRST(&sh->sh_sopool); 620 LDAP_LIST_REMOVE(so, so_link); 621 so->so_ptr = tmpp; 622 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start], 623 so, so_link); 624 break; 625 626 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: " 627 "free object not found while bit is clear.\n", 628 0, 0, 0 ); 629 assert(so != NULL); 630 631 } 632 } else { 633 if ( !inserted ) { 634 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) { 635 slap_replenish_sopool(sh); 636 } 637 so = LDAP_LIST_FIRST(&sh->sh_sopool); 638 LDAP_LIST_REMOVE(so, so_link); 639 so->so_ptr = tmpp; 640 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start], 641 so, so_link); 642 } 643 break; 644 } 645 } 646 } 647 } 648} 649 650/* 651 * Return the memory context of the current thread if the given block of 652 * memory belongs to it, otherwise return NULL. 653 */ 654void * 655slap_sl_context( void *ptr ) 656{ 657 void *memctx; 658 struct slab_heap *sh; 659 660 if ( slapMode & SLAP_TOOL_MODE ) return NULL; 661 662 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx); 663 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) { 664 return sh; 665 } 666 return NULL; 667} 668 669static struct slab_object * 670slap_replenish_sopool( 671 struct slab_heap* sh 672) 673{ 674 struct slab_object *so_block; 675 int i; 676 677 so_block = (struct slab_object *)ch_malloc( 678 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object)); 679 680 if ( so_block == NULL ) { 681 return NULL; 682 } 683 684 so_block[0].so_blockhead = 1; 685 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link); 686 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) { 687 so_block[i].so_blockhead = 0; 688 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link ); 689 } 690 691 return so_block; 692} 693 694#ifdef SLAPD_UNUSED 695static void 696print_slheap(int level, void *ctx) 697{ 698 struct slab_heap *sh = ctx; 699 struct slab_object *so; 700 int i, j, once = 0; 701 702 if (!ctx) { 703 Debug(level, "NULL memctx\n", 0, 0, 0); 704 return; 705 } 706 707 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder, 0, 0); 708 709 for (i = order_start; i <= sh->sh_maxorder; i++) { 710 once = 0; 711 Debug(level, "order=%d\n", i, 0, 0); 712 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) { 713 Debug(level, "%02x ", sh->sh_map[i-order_start][j], 0, 0); 714 once = 1; 715 } 716 if (!once) { 717 Debug(level, "%02x ", sh->sh_map[i-order_start][0], 0, 0); 718 } 719 Debug(level, "\n", 0, 0, 0); 720 Debug(level, "free list:\n", 0, 0, 0); 721 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]); 722 while (so) { 723 Debug(level, "%p\n", so->so_ptr, 0, 0); 724 so = LDAP_LIST_NEXT(so, so_link); 725 } 726 } 727} 728#endif 729