mt_allocator.cc revision 225736
11558Srgrimes// Allocator details. 21558Srgrimes 31558Srgrimes// Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc. 41558Srgrimes// 51558Srgrimes// This file is part of the GNU ISO C++ Library. This library is free 61558Srgrimes// software; you can redistribute it and/or modify it under the 71558Srgrimes// terms of the GNU General Public License as published by the 81558Srgrimes// Free Software Foundation; either version 2, or (at your option) 91558Srgrimes// any later version. 101558Srgrimes 111558Srgrimes// This library is distributed in the hope that it will be useful, 121558Srgrimes// but WITHOUT ANY WARRANTY; without even the implied warranty of 131558Srgrimes// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 141558Srgrimes// GNU General Public License for more details. 151558Srgrimes 161558Srgrimes// You should have received a copy of the GNU General Public License along 171558Srgrimes// with this library; see the file COPYING. If not, write to the Free 181558Srgrimes// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 191558Srgrimes// USA. 201558Srgrimes 211558Srgrimes// As a special exception, you may use this file as part of a free software 221558Srgrimes// library without restriction. Specifically, if other files instantiate 231558Srgrimes// templates or use macros or inline functions from this file, or you compile 241558Srgrimes// this file and link it with other files to produce an executable, this 251558Srgrimes// file does not by itself cause the resulting executable to be covered by 261558Srgrimes// the GNU General Public License. This exception does not however 271558Srgrimes// invalidate any other reasons why the executable file might be covered by 281558Srgrimes// the GNU General Public License. 291558Srgrimes 301558Srgrimes// 311558Srgrimes// ISO C++ 14882: 321558Srgrimes// 331558Srgrimes 341558Srgrimes#include <bits/c++config.h> 351558Srgrimes#include <ext/concurrence.h> 361558Srgrimes#include <ext/mt_allocator.h> 37114589Sobrien#include <cstring> 381558Srgrimes 3923247Swollmannamespace 401558Srgrimes{ 411558Srgrimes#ifdef __GTHREADS 421558Srgrimes struct __freelist 431558Srgrimes { 441558Srgrimes typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 451558Srgrimes _Thread_record* _M_thread_freelist; 46114589Sobrien _Thread_record* _M_thread_freelist_array; 4737671Scharnier size_t _M_max_threads; 48114589Sobrien __gthread_key_t _M_key; 49114589Sobrien 501558Srgrimes ~__freelist() 511558Srgrimes { 521558Srgrimes if (_M_thread_freelist_array) 531558Srgrimes { 5437671Scharnier __gthread_key_delete(_M_key); 551558Srgrimes ::operator delete(static_cast<void*>(_M_thread_freelist_array)); 561558Srgrimes } 571558Srgrimes } 581558Srgrimes }; 591558Srgrimes 601558Srgrimes // Ensure freelist is constructed first. 611558Srgrimes static __freelist freelist; 621558Srgrimes __gnu_cxx::__mutex freelist_mutex; 631558Srgrimes 641558Srgrimes static void 651558Srgrimes _M_destroy_thread_key(void* __id) 661558Srgrimes { 671558Srgrimes // Return this thread id record to the front of thread_freelist. 681558Srgrimes __gnu_cxx::__scoped_lock sentry(freelist_mutex); 6923247Swollman size_t _M_id = reinterpret_cast<size_t>(__id); 70109733Smaxim 71109731Smaxim typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 72109733Smaxim _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1]; 73109733Smaxim __tr->_M_next = freelist._M_thread_freelist; 7423247Swollman freelist._M_thread_freelist = __tr; 75109733Smaxim } 76109733Smaxim#endif 77109733Smaxim} // anonymous namespace 78109733Smaxim 79109733Smaxim_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) 80109733Smaxim 81109733Smaxim void 82109733Smaxim __pool<false>::_M_destroy() throw() 83109733Smaxim { 84109733Smaxim if (_M_init && !_M_options._M_force_new) 85109733Smaxim { 8623247Swollman for (size_t __n = 0; __n < _M_bin_size; ++__n) 8723247Swollman { 8823247Swollman _Bin_record& __bin = _M_bin[__n]; 8927508Swollman while (__bin._M_address) 9023247Swollman { 9123247Swollman _Block_address* __tmp = __bin._M_address->_M_next; 9223247Swollman ::operator delete(__bin._M_address->_M_initial); 9323247Swollman __bin._M_address = __tmp; 9423247Swollman } 9523247Swollman ::operator delete(__bin._M_first); 9623247Swollman } 9723247Swollman ::operator delete(_M_bin); 9823247Swollman ::operator delete(_M_binmap); 9999446Smaxim } 100111765Smdodd } 101111765Smdodd 102111765Smdodd void 103112729Smdodd __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) 10427533Sbde { 10527533Sbde // Round up to power of 2 and figure out which bin to use. 10699447Smaxim const size_t __which = _M_binmap[__bytes]; 10799447Smaxim _Bin_record& __bin = _M_bin[__which]; 1081558Srgrimes 10956342Sbillf char* __c = __p - _M_get_align(); 110109731Smaxim _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 1111558Srgrimes 1121558Srgrimes // Single threaded application - return to global pool. 1131558Srgrimes __block->_M_next = __bin._M_first[0]; 1141558Srgrimes __bin._M_first[0] = __block; 1151558Srgrimes } 1161558Srgrimes 1171558Srgrimes char* 1181558Srgrimes __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 1191558Srgrimes { 12020540Sfenner // Round up to power of 2 and figure out which bin to use. 12120540Sfenner const size_t __which = _M_binmap[__bytes]; 12220540Sfenner _Bin_record& __bin = _M_bin[__which]; 12320540Sfenner const _Tune& __options = _M_get_options(); 12420540Sfenner const size_t __bin_size = (__options._M_min_bin << __which) 12520540Sfenner + __options._M_align; 12620540Sfenner size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 12720540Sfenner __block_count /= __bin_size; 12820540Sfenner 12920540Sfenner // Get a new block dynamically, set it up for use. 13020540Sfenner void* __v = ::operator new(__options._M_chunk_size); 13120540Sfenner _Block_address* __address = static_cast<_Block_address*>(__v); 13220540Sfenner __address->_M_initial = __v; 13322417Sdanny __address->_M_next = __bin._M_address; 13455505Sshin __bin._M_address = __address; 13555505Sshin 13655505Sshin char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 13755505Sshin _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 13855505Sshin __bin._M_first[__thread_id] = __block; 13974029Sru while (--__block_count > 0) 14077119Sphk { 141104339Sdd __c += __bin_size; 142109731Smaxim __block->_M_next = reinterpret_cast<_Block_record*>(__c); 143110009Smdodd __block = __block->_M_next; 144111765Smdodd } 1451558Srgrimes __block->_M_next = NULL; 1461558Srgrimes 1471558Srgrimes __block = __bin._M_first[__thread_id]; 1481558Srgrimes __bin._M_first[__thread_id] = __block->_M_next; 1491558Srgrimes 1501558Srgrimes // NB: For alignment reasons, we can't use the first _M_align 1511558Srgrimes // bytes, even when sizeof(_Block_record) < _M_align. 1521558Srgrimes return reinterpret_cast<char*>(__block) + __options._M_align; 1531558Srgrimes } 1541558Srgrimes 15579403Smjacob void 1561558Srgrimes __pool<false>::_M_initialize() 157112531Sbde { 1581558Srgrimes // _M_force_new must not change after the first allocate(), which 159109731Smaxim // in turn calls this method, so if it's false, it's false forever 160109733Smaxim // and we don't need to return here ever again. 1611558Srgrimes if (_M_options._M_force_new) 1621558Srgrimes { 1631558Srgrimes _M_init = true; 16442337Simp return; 1651558Srgrimes } 16623295Simp 167111765Smdodd // Create the bins. 168111765Smdodd // Calculate the number of bins required based on _M_max_bytes. 169111765Smdodd // _M_bin_size is statically-initialized to one. 170117548Smaxim size_t __bin_size = _M_options._M_min_bin; 1711558Srgrimes while (_M_options._M_max_bytes > __bin_size) 1721558Srgrimes { 173109733Smaxim __bin_size <<= 1; 1741558Srgrimes ++_M_bin_size; 1751558Srgrimes } 1761558Srgrimes 1771558Srgrimes // Setup the bin map for quick lookup of the relevant bin. 17838549Sdillon const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 1791558Srgrimes _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 1801558Srgrimes _Binmap_type* __bp = _M_binmap; 1811558Srgrimes _Binmap_type __bin_max = _M_options._M_min_bin; 1821558Srgrimes _Binmap_type __bint = 0; 1831558Srgrimes for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 1841558Srgrimes { 18527508Swollman if (__ct > __bin_max) 1861558Srgrimes { 18727533Sbde __bin_max <<= 1; 1883792Ssef ++__bint; 18927533Sbde } 1903792Ssef *__bp++ = __bint; 19123247Swollman } 19223247Swollman 19323247Swollman // Initialize _M_bin and its members. 19427533Sbde void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 19523247Swollman _M_bin = static_cast<_Bin_record*>(__v); 19623247Swollman for (size_t __n = 0; __n < _M_bin_size; ++__n) 197111765Smdodd { 19823247Swollman _Bin_record& __bin = _M_bin[__n]; 19923247Swollman __v = ::operator new(sizeof(_Block_record*)); 20036378Sfenner __bin._M_first = static_cast<_Block_record**>(__v); 20123247Swollman __bin._M_first[0] = NULL; 20223247Swollman __bin._M_address = NULL; 20327533Sbde } 20423247Swollman _M_init = true; 20537671Scharnier } 2061558Srgrimes 20723247Swollman 2081558Srgrimes#ifdef __GTHREADS 2091558Srgrimes void 21023247Swollman __pool<true>::_M_destroy() throw() 2111558Srgrimes { 212111932Sseanc if (_M_init && !_M_options._M_force_new) 21393035Sobrien { 214109733Smaxim if (__gthread_active_p()) 21593035Sobrien { 216109731Smaxim for (size_t __n = 0; __n < _M_bin_size; ++__n) 21793035Sobrien { 21893035Sobrien _Bin_record& __bin = _M_bin[__n]; 21993035Sobrien while (__bin._M_address) 220109731Smaxim { 22199447Smaxim _Block_address* __tmp = __bin._M_address->_M_next; 222110054Smdodd ::operator delete(__bin._M_address->_M_initial); 223109733Smaxim __bin._M_address = __tmp; 22493035Sobrien } 22593035Sobrien ::operator delete(__bin._M_first); 22693035Sobrien ::operator delete(__bin._M_free); 227109733Smaxim ::operator delete(__bin._M_used); 228109733Smaxim ::operator delete(__bin._M_mutex); 22993035Sobrien } 230117548Smaxim } 231109733Smaxim else 23293035Sobrien { 23393035Sobrien for (size_t __n = 0; __n < _M_bin_size; ++__n) 2341558Srgrimes { 23599447Smaxim _Bin_record& __bin = _M_bin[__n]; 2361558Srgrimes while (__bin._M_address) 237109733Smaxim { 23893035Sobrien _Block_address* __tmp = __bin._M_address->_M_next; 239111932Sseanc ::operator delete(__bin._M_address->_M_initial); 24055505Sshin __bin._M_address = __tmp; 24193035Sobrien } 24255505Sshin ::operator delete(__bin._M_first); 2431558Srgrimes } 24417474Sfenner } 24517474Sfenner ::operator delete(_M_bin); 24617474Sfenner ::operator delete(_M_binmap); 24717474Sfenner } 24817474Sfenner } 24923247Swollman 25023247Swollman void 25117474Sfenner __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) 25217474Sfenner { 25323295Simp // Round up to power of 2 and figure out which bin to use. 25417474Sfenner const size_t __which = _M_binmap[__bytes]; 255109731Smaxim const _Bin_record& __bin = _M_bin[__which]; 2563792Ssef 257109731Smaxim // Know __p not null, assume valid block. 25874029Sru char* __c = __p - _M_get_align(); 259112985Smdodd _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 26074029Sru if (__gthread_active_p()) 26155505Sshin { 26274029Sru // Calculate the number of records to remove from our freelist: 26355505Sshin // in order to avoid too much contention we wait until the 26474029Sru // number of records is "high enough". 26574029Sru const size_t __thread_id = _M_get_thread_id(); 26655505Sshin const _Tune& __options = _M_get_options(); 2671558Srgrimes const size_t __limit = (100 * (_M_bin_size - __which) 26877119Sphk * __options._M_freelist_headroom); 26977119Sphk 27077119Sphk size_t __remove = __bin._M_free[__thread_id]; 27122417Sdanny __remove *= __options._M_freelist_headroom; 27222417Sdanny 27322417Sdanny // NB: We assume that reads of _Atomic_words are atomic. 2741558Srgrimes const size_t __max_threads = __options._M_max_threads + 1; 27523247Swollman _Atomic_word* const __reclaimed_base = 27623247Swollman reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 27723247Swollman const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 27823247Swollman const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed; 27923247Swollman 28023247Swollman // NB: For performance sake we don't resync every time, in order 2811558Srgrimes // to spare atomic ops. Note that if __reclaimed increased by, 282109731Smaxim // say, 1024, since the last sync, it means that the other 283109731Smaxim // threads executed the atomic in the else below at least the 284109731Smaxim // same number of times (at least, because _M_reserve_block may 285109731Smaxim // have decreased the counter), therefore one more cannot hurt. 2861558Srgrimes if (__reclaimed > 1024) 2871558Srgrimes { 2881558Srgrimes __bin._M_used[__thread_id] -= __reclaimed; 2891558Srgrimes __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 29038549Sdillon } 29123247Swollman 29223247Swollman if (__remove >= __net_used) 2931558Srgrimes __remove -= __net_used; 2941558Srgrimes else 2951558Srgrimes __remove = 0; 2961558Srgrimes if (__remove > __limit && __remove > __bin._M_free[__thread_id]) 297111287Sru { 298111287Sru _Block_record* __first = __bin._M_first[__thread_id]; 299111287Sru _Block_record* __tmp = __first; 300111287Sru __remove /= __options._M_freelist_headroom; 301111287Sru const size_t __removed = __remove; 302111287Sru while (--__remove > 0) 303111287Sru __tmp = __tmp->_M_next; 3041558Srgrimes __bin._M_first[__thread_id] = __tmp->_M_next; 30593638Smaxim __bin._M_free[__thread_id] -= __removed; 30693638Smaxim 30793638Smaxim __gthread_mutex_lock(__bin._M_mutex); 30893638Smaxim __tmp->_M_next = __bin._M_first[0]; 30993638Smaxim __bin._M_first[0] = __first; 31093638Smaxim __bin._M_free[0] += __removed; 31193638Smaxim __gthread_mutex_unlock(__bin._M_mutex); 31293638Smaxim } 31393638Smaxim 31438549Sdillon // Return this block to our list and update counters and 3151558Srgrimes // owner id as needed. 316111287Sru if (__block->_M_thread_id == __thread_id) 317111287Sru --__bin._M_used[__thread_id]; 318111287Sru else 31920540Sfenner __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1); 3201558Srgrimes 32123247Swollman __block->_M_next = __bin._M_first[__thread_id]; 32223247Swollman __bin._M_first[__thread_id] = __block; 32393638Smaxim 32493638Smaxim ++__bin._M_free[__thread_id]; 32546643Smckay } 32623251Simp else 32723251Simp { 32823251Simp // Not using threads, so single threaded application - return 32923247Swollman // to global pool. 3301558Srgrimes __block->_M_next = __bin._M_first[0]; 331111287Sru __bin._M_first[0] = __block; 332111765Smdodd } 333111765Smdodd } 334111765Smdodd 335111765Smdodd char* 336111765Smdodd __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 337111765Smdodd { 338111765Smdodd // Round up to power of 2 and figure out which bin to use. 339111765Smdodd const size_t __which = _M_binmap[__bytes]; 340111765Smdodd const _Tune& __options = _M_get_options(); 341111765Smdodd const size_t __bin_size = ((__options._M_min_bin << __which) 342111765Smdodd + __options._M_align); 343111765Smdodd size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 344111765Smdodd __block_count /= __bin_size; 34520540Sfenner 34674029Sru // Are we using threads? 34774029Sru // - Yes, check if there are free blocks on the global 348109732Smaxim // list. If so, grab up to __block_count blocks in one 34993638Smaxim // lock and change ownership. If the global list is 35074029Sru // empty, we allocate a new chunk and add those blocks 35174029Sru // directly to our own freelist (with us as owner). 35274029Sru // - No, all operations are made directly to global pool 0 3531558Srgrimes // no need to lock or change ownership but check for free 3541558Srgrimes // blocks on global list (and if not add new ones) and 3551558Srgrimes // get the first one. 356104339Sdd _Bin_record& __bin = _M_bin[__which]; 357104339Sdd _Block_record* __block = NULL; 358104339Sdd if (__gthread_active_p()) 359111287Sru { 360111287Sru // Resync the _M_used counters. 361111287Sru const size_t __max_threads = __options._M_max_threads + 1; 362111287Sru _Atomic_word* const __reclaimed_base = 363111287Sru reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 364111287Sru const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 365111287Sru __bin._M_used[__thread_id] -= __reclaimed; 366111287Sru __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 367111287Sru 368111287Sru __gthread_mutex_lock(__bin._M_mutex); 369111287Sru if (__bin._M_first[0] == NULL) 370111287Sru { 371111287Sru void* __v = ::operator new(__options._M_chunk_size); 3721558Srgrimes _Block_address* __address = static_cast<_Block_address*>(__v); 3731558Srgrimes __address->_M_initial = __v; 374110054Smdodd __address->_M_next = __bin._M_address; 375109733Smaxim __bin._M_address = __address; 37617724Sfenner __gthread_mutex_unlock(__bin._M_mutex); 37717724Sfenner 37817724Sfenner // No need to hold the lock when we are adding a whole 3791558Srgrimes // chunk to our own list. 3801558Srgrimes char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 3811558Srgrimes __block = reinterpret_cast<_Block_record*>(__c); 3821558Srgrimes __bin._M_free[__thread_id] = __block_count; 3831558Srgrimes __bin._M_first[__thread_id] = __block; 3841558Srgrimes while (--__block_count > 0) 3851558Srgrimes { 3861558Srgrimes __c += __bin_size; 3871558Srgrimes __block->_M_next = reinterpret_cast<_Block_record*>(__c); 388111287Sru __block = __block->_M_next; 389111287Sru } 390111287Sru __block->_M_next = NULL; 3911558Srgrimes } 39223247Swollman else 39399447Smaxim { 39493638Smaxim // Is the number of required blocks greater than or equal 39593638Smaxim // to the number that can be provided by the global free 396109734Smaxim // list? 397109734Smaxim __bin._M_first[__thread_id] = __bin._M_first[0]; 398109734Smaxim if (__block_count >= __bin._M_free[0]) 399109734Smaxim { 400109734Smaxim __bin._M_free[__thread_id] = __bin._M_free[0]; 401109734Smaxim __bin._M_free[0] = 0; 40223247Swollman __bin._M_first[0] = NULL; 4031558Srgrimes } 404111287Sru else 405111287Sru { 406111287Sru __bin._M_free[__thread_id] = __block_count; 407111287Sru __bin._M_free[0] -= __block_count; 408111287Sru __block = __bin._M_first[0]; 409111287Sru while (--__block_count > 0) 410111287Sru __block = __block->_M_next; 41142337Simp __bin._M_first[0] = __block->_M_next; 41255996Sbillf __block->_M_next = NULL; 41356342Sbillf } 41456342Sbillf __gthread_mutex_unlock(__bin._M_mutex); 41555996Sbillf } 41655996Sbillf } 41756342Sbillf else 41856342Sbillf { 41956342Sbillf void* __v = ::operator new(__options._M_chunk_size); 42056342Sbillf _Block_address* __address = static_cast<_Block_address*>(__v); 42155996Sbillf __address->_M_initial = __v; 4221558Srgrimes __address->_M_next = __bin._M_address; 4231558Srgrimes __bin._M_address = __address; 4241558Srgrimes 425109731Smaxim char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 426109731Smaxim __block = reinterpret_cast<_Block_record*>(__c); 427109731Smaxim __bin._M_first[0] = __block; 428109731Smaxim while (--__block_count > 0) 429109731Smaxim { 430109731Smaxim __c += __bin_size; 431109731Smaxim __block->_M_next = reinterpret_cast<_Block_record*>(__c); 4321558Srgrimes __block = __block->_M_next; 43337671Scharnier } 4341558Srgrimes __block->_M_next = NULL; 43523247Swollman } 4361558Srgrimes 43723247Swollman __block = __bin._M_first[__thread_id]; 43837671Scharnier __bin._M_first[__thread_id] = __block->_M_next; 43923247Swollman 4401558Srgrimes if (__gthread_active_p()) 441112568Smdodd { 442112568Smdodd __block->_M_thread_id = __thread_id; 443112568Smdodd --__bin._M_free[__thread_id]; 444111765Smdodd ++__bin._M_used[__thread_id]; 445111765Smdodd } 446112568Smdodd 447111765Smdodd // NB: For alignment reasons, we can't use the first _M_align 448111765Smdodd // bytes, even when sizeof(_Block_record) < _M_align. 449112568Smdodd return reinterpret_cast<char*>(__block) + __options._M_align; 450112568Smdodd } 451111765Smdodd 452111765Smdodd void 453112568Smdodd __pool<true>::_M_initialize() 454111765Smdodd { 455111765Smdodd // _M_force_new must not change after the first allocate(), 456112568Smdodd // which in turn calls this method, so if it's false, it's false 457112568Smdodd // forever and we don't need to return here ever again. 458112568Smdodd if (_M_options._M_force_new) 459112568Smdodd { 460111765Smdodd _M_init = true; 461117549Smaxim return; 462109734Smaxim } 463117548Smaxim 464117548Smaxim // Create the bins. 465109734Smaxim // Calculate the number of bins required based on _M_max_bytes. 466112531Sbde // _M_bin_size is statically-initialized to one. 467109734Smaxim size_t __bin_size = _M_options._M_min_bin; 468117548Smaxim while (_M_options._M_max_bytes > __bin_size) 469117549Smaxim { 470110054Smdodd __bin_size <<= 1; 471110054Smdodd ++_M_bin_size; 472110054Smdodd } 47342337Simp 474111932Sseanc // Setup the bin map for quick lookup of the relevant bin. 475111932Sseanc const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 476111932Sseanc _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 47742337Simp _Binmap_type* __bp = _M_binmap; 47842337Simp _Binmap_type __bin_max = _M_options._M_min_bin; 47942337Simp _Binmap_type __bint = 0; 48042337Simp for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 48142337Simp { 48293638Smaxim if (__ct > __bin_max) 48342337Simp { 484111932Sseanc __bin_max <<= 1; 485111932Sseanc ++__bint; 48699447Smaxim } 48793638Smaxim *__bp++ = __bint; 488111932Sseanc } 489111932Sseanc 49093638Smaxim // Initialize _M_bin and its members. 49193638Smaxim void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 49242337Simp _M_bin = static_cast<_Bin_record*>(__v); 49342337Simp 49442337Simp // If __gthread_active_p() create and initialize the list of 495111932Sseanc // free thread ids. Single threaded applications use thread id 0 49642337Simp // directly and have no need for this. 49742337Simp if (__gthread_active_p()) 49842337Simp { 49979403Smjacob { 50079403Smjacob __gnu_cxx::__scoped_lock sentry(freelist_mutex); 5011558Srgrimes 50279403Smjacob if (!freelist._M_thread_freelist_array 50323247Swollman || freelist._M_max_threads < _M_options._M_max_threads) 5041558Srgrimes { 50523247Swollman const size_t __k = sizeof(_Thread_record) 50623247Swollman * _M_options._M_max_threads; 50723247Swollman __v = ::operator new(__k); 50823247Swollman _M_thread_freelist = static_cast<_Thread_record*>(__v); 50993638Smaxim 51023247Swollman // NOTE! The first assignable thread id is 1 since the 511111932Sseanc // global pool uses id 0 51293638Smaxim size_t __i; 51323247Swollman for (__i = 1; __i < _M_options._M_max_threads; ++__i) 5141558Srgrimes { 51531956Simp _Thread_record& __tr = _M_thread_freelist[__i - 1]; 5161558Srgrimes __tr._M_next = &_M_thread_freelist[__i]; 5171558Srgrimes __tr._M_id = __i; 5181558Srgrimes } 51923247Swollman 52023247Swollman // Set last record. 5211558Srgrimes _M_thread_freelist[__i - 1]._M_next = NULL; 52223247Swollman _M_thread_freelist[__i - 1]._M_id = __i; 52393638Smaxim 52493638Smaxim if (!freelist._M_thread_freelist_array) 52523247Swollman { 52623247Swollman // Initialize per thread key to hold pointer to 52793638Smaxim // _M_thread_freelist. 52893638Smaxim __gthread_key_create(&freelist._M_key, 52923247Swollman ::_M_destroy_thread_key); 530112568Smdodd freelist._M_thread_freelist = _M_thread_freelist; 5311558Srgrimes } 53223247Swollman else 5331558Srgrimes { 534112568Smdodd _Thread_record* _M_old_freelist 5351558Srgrimes = freelist._M_thread_freelist; 5361558Srgrimes _Thread_record* _M_old_array 5371558Srgrimes = freelist._M_thread_freelist_array; 5381558Srgrimes freelist._M_thread_freelist 53917474Sfenner = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 54017474Sfenner while (_M_old_freelist) 54123247Swollman { 5421558Srgrimes size_t next_id; 5431558Srgrimes if (_M_old_freelist->_M_next) 5441558Srgrimes next_id = _M_old_freelist->_M_next - _M_old_array; 5451558Srgrimes else 5461558Srgrimes next_id = freelist._M_max_threads; 5471558Srgrimes _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 5481558Srgrimes = &_M_thread_freelist[next_id]; 5491558Srgrimes _M_old_freelist = _M_old_freelist->_M_next; 55055505Sshin } 55155505Sshin ::operator delete(static_cast<void*>(_M_old_array)); 55255505Sshin } 55355505Sshin freelist._M_thread_freelist_array = _M_thread_freelist; 55455505Sshin freelist._M_max_threads = _M_options._M_max_threads; 55555505Sshin } 55655505Sshin } 55768905Skris 55855505Sshin const size_t __max_threads = _M_options._M_max_threads + 1; 55955505Sshin for (size_t __n = 0; __n < _M_bin_size; ++__n) 56093638Smaxim { 56193638Smaxim _Bin_record& __bin = _M_bin[__n]; 56255505Sshin __v = ::operator new(sizeof(_Block_record*) * __max_threads); 56355505Sshin std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 5641558Srgrimes __bin._M_first = static_cast<_Block_record**>(__v); 56555505Sshin 56655505Sshin __bin._M_address = NULL; 56755505Sshin 56868905Skris __v = ::operator new(sizeof(size_t) * __max_threads); 56955505Sshin std::memset(__v, 0, sizeof(size_t) * __max_threads); 57055505Sshin 57193638Smaxim __bin._M_free = static_cast<size_t*>(__v); 57293638Smaxim 57355505Sshin __v = ::operator new(sizeof(size_t) * __max_threads 57455505Sshin + sizeof(_Atomic_word) * __max_threads); 57555505Sshin std::memset(__v, 0, (sizeof(size_t) * __max_threads 57655505Sshin + sizeof(_Atomic_word) * __max_threads)); 57755505Sshin __bin._M_used = static_cast<size_t*>(__v); 57855505Sshin 579109731Smaxim __v = ::operator new(sizeof(__gthread_mutex_t)); 580109731Smaxim __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 581109731Smaxim 582109731Smaxim#ifdef __GTHREAD_MUTEX_INIT 583109731Smaxim { 584109731Smaxim // Do not copy a POSIX/gthr mutex once in use. 585109731Smaxim __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 586109731Smaxim *__bin._M_mutex = __tmp; 587109731Smaxim } 588109731Smaxim#else 589109731Smaxim { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 590109731Smaxim#endif 591109731Smaxim } 592109731Smaxim } 593109731Smaxim else 594109731Smaxim { 595109731Smaxim for (size_t __n = 0; __n < _M_bin_size; ++__n) 596109731Smaxim { 597109731Smaxim _Bin_record& __bin = _M_bin[__n]; 598111932Sseanc __v = ::operator new(sizeof(_Block_record*)); 599109731Smaxim __bin._M_first = static_cast<_Block_record**>(__v); 600109731Smaxim __bin._M_first[0] = NULL; 6011558Srgrimes __bin._M_address = NULL; 6021558Srgrimes } 6031558Srgrimes } 60436378Sfenner _M_init = true; 6051558Srgrimes } 60636378Sfenner 6071558Srgrimes size_t 60836378Sfenner __pool<true>::_M_get_thread_id() 6091558Srgrimes { 61023247Swollman // If we have thread support and it's active we check the thread 61123247Swollman // key value and return its id or if it's not set we take the 6121558Srgrimes // first record from _M_thread_freelist and sets the key and 61323247Swollman // returns it's id. 61493638Smaxim if (__gthread_active_p()) 6151558Srgrimes { 6161558Srgrimes void* v = __gthread_getspecific(freelist._M_key); 6171558Srgrimes size_t _M_id = (size_t)v; 61874029Sru if (_M_id == 0) 61974029Sru { 62074029Sru { 62174029Sru __gnu_cxx::__scoped_lock sentry(freelist_mutex); 62274029Sru if (freelist._M_thread_freelist) 62374029Sru { 62420540Sfenner _M_id = freelist._M_thread_freelist->_M_id; 62520540Sfenner freelist._M_thread_freelist 62620540Sfenner = freelist._M_thread_freelist->_M_next; 62723247Swollman } 62820540Sfenner } 62920540Sfenner 63020540Sfenner __gthread_setspecific(freelist._M_key, (void*)_M_id); 63174029Sru } 63274029Sru return _M_id >= _M_options._M_max_threads ? 0 : _M_id; 63323247Swollman } 63420540Sfenner 63520540Sfenner // Otherwise (no thread support or inactive) all requests are 63620540Sfenner // served from the global pool 0. 63720540Sfenner return 0; 63820540Sfenner } 63923247Swollman 64020540Sfenner // XXX GLIBCXX_ABI Deprecated 64120540Sfenner void 64236378Sfenner __pool<true>::_M_destroy_thread_key(void*) { } 64336378Sfenner 64436378Sfenner // XXX GLIBCXX_ABI Deprecated 64536378Sfenner void 64636378Sfenner __pool<true>::_M_initialize(__destroy_handler) 64736378Sfenner { 64820540Sfenner // _M_force_new must not change after the first allocate(), 6491558Srgrimes // which in turn calls this method, so if it's false, it's false 6501558Srgrimes // forever and we don't need to return here ever again. 6511558Srgrimes if (_M_options._M_force_new) 6521558Srgrimes { 65323247Swollman _M_init = true; 65423247Swollman return; 6551558Srgrimes } 65699447Smaxim 65799447Smaxim // Create the bins. 65899447Smaxim // Calculate the number of bins required based on _M_max_bytes. 65999447Smaxim // _M_bin_size is statically-initialized to one. 66099447Smaxim size_t __bin_size = _M_options._M_min_bin; 66199447Smaxim while (_M_options._M_max_bytes > __bin_size) 6621558Srgrimes { 6631558Srgrimes __bin_size <<= 1; 664109733Smaxim ++_M_bin_size; 66579018Srwatson } 66679018Srwatson 66779018Srwatson // Setup the bin map for quick lookup of the relevant bin. 66842337Simp const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 66942337Simp _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 67042337Simp _Binmap_type* __bp = _M_binmap; 67142337Simp _Binmap_type __bin_max = _M_options._M_min_bin; 67242337Simp _Binmap_type __bint = 0; 67342337Simp for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 67442337Simp { 6751558Srgrimes if (__ct > __bin_max) 6761558Srgrimes { 67720280Sbde __bin_max <<= 1; 67827354Ssef ++__bint; 67927354Ssef } 68020280Sbde *__bp++ = __bint; 68127354Ssef } 68220205Spst 68320195Ssef // Initialize _M_bin and its members. 68427354Ssef void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 68527354Ssef _M_bin = static_cast<_Bin_record*>(__v); 68627354Ssef 68727354Ssef // If __gthread_active_p() create and initialize the list of 68827354Ssef // free thread ids. Single threaded applications use thread id 0 68927354Ssef // directly and have no need for this. 69027354Ssef if (__gthread_active_p()) 69120195Ssef { 69223385Simp { 69320195Ssef __gnu_cxx::__scoped_lock sentry(freelist_mutex); 69420195Ssef 69556342Sbillf if (!freelist._M_thread_freelist_array 69656342Sbillf || freelist._M_max_threads < _M_options._M_max_threads) 69756342Sbillf { 69856342Sbillf const size_t __k = sizeof(_Thread_record) 69956342Sbillf * _M_options._M_max_threads; 70056342Sbillf __v = ::operator new(__k); 70136378Sfenner _M_thread_freelist = static_cast<_Thread_record*>(__v); 70236378Sfenner 70336378Sfenner // NOTE! The first assignable thread id is 1 since the 70436378Sfenner // global pool uses id 0 70536378Sfenner size_t __i; 70636378Sfenner for (__i = 1; __i < _M_options._M_max_threads; ++__i) 70736378Sfenner { 70836378Sfenner _Thread_record& __tr = _M_thread_freelist[__i - 1]; 709117548Smaxim __tr._M_next = &_M_thread_freelist[__i]; 71036378Sfenner __tr._M_id = __i; 71119864Ssef } 71219864Ssef 71319864Ssef // Set last record. 71419864Ssef _M_thread_freelist[__i - 1]._M_next = NULL; 71519864Ssef _M_thread_freelist[__i - 1]._M_id = __i; 71619864Ssef 71789349Sru if (!freelist._M_thread_freelist_array) 71889349Sru { 71989349Sru // Initialize per thread key to hold pointer to 72089349Sru // _M_thread_freelist. 72189349Sru __gthread_key_create(&freelist._M_key, 72289349Sru ::_M_destroy_thread_key); 72389349Sru freelist._M_thread_freelist = _M_thread_freelist; 72489349Sru } 72589349Sru else 7261558Srgrimes { 72736378Sfenner _Thread_record* _M_old_freelist 72836378Sfenner = freelist._M_thread_freelist; 72936378Sfenner _Thread_record* _M_old_array 73036378Sfenner = freelist._M_thread_freelist_array; 73138549Sdillon freelist._M_thread_freelist 73238549Sdillon = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 73336378Sfenner while (_M_old_freelist) 7341558Srgrimes { 735109733Smaxim size_t next_id; 73627533Sbde if (_M_old_freelist->_M_next) 737109733Smaxim next_id = _M_old_freelist->_M_next - _M_old_array; 73836378Sfenner else 739109733Smaxim next_id = freelist._M_max_threads; 7401558Srgrimes _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 74120280Sbde = &_M_thread_freelist[next_id]; 742111932Sseanc _M_old_freelist = _M_old_freelist->_M_next; 743103146Snectar } 74436378Sfenner ::operator delete(static_cast<void*>(_M_old_array)); 74536378Sfenner } 74636378Sfenner freelist._M_thread_freelist_array = _M_thread_freelist; 74736378Sfenner freelist._M_max_threads = _M_options._M_max_threads; 74836378Sfenner } 74936378Sfenner } 75036378Sfenner 75136378Sfenner const size_t __max_threads = _M_options._M_max_threads + 1; 7521558Srgrimes for (size_t __n = 0; __n < _M_bin_size; ++__n) 75337671Scharnier { 75436378Sfenner _Bin_record& __bin = _M_bin[__n]; 75536378Sfenner __v = ::operator new(sizeof(_Block_record*) * __max_threads); 75636378Sfenner std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 75736378Sfenner __bin._M_first = static_cast<_Block_record**>(__v); 75836378Sfenner 75936378Sfenner __bin._M_address = NULL; 76046643Smckay 76146643Smckay __v = ::operator new(sizeof(size_t) * __max_threads); 76236378Sfenner std::memset(__v, 0, sizeof(size_t) * __max_threads); 763111932Sseanc __bin._M_free = static_cast<size_t*>(__v); 76436378Sfenner 76536378Sfenner __v = ::operator new(sizeof(size_t) * __max_threads + 76636378Sfenner sizeof(_Atomic_word) * __max_threads); 76736378Sfenner std::memset(__v, 0, (sizeof(size_t) * __max_threads 76836378Sfenner + sizeof(_Atomic_word) * __max_threads)); 76936378Sfenner __bin._M_used = static_cast<size_t*>(__v); 77036378Sfenner 77136378Sfenner __v = ::operator new(sizeof(__gthread_mutex_t)); 77236378Sfenner __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 77337671Scharnier 7741558Srgrimes#ifdef __GTHREAD_MUTEX_INIT 77536378Sfenner { 77636378Sfenner // Do not copy a POSIX/gthr mutex once in use. 77736378Sfenner __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 77836378Sfenner *__bin._M_mutex = __tmp; 779111932Sseanc } 78036713Sjb#else 781103229Speter { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 782111932Sseanc#endif 78336713Sjb } 78436378Sfenner } 785111932Sseanc else 78636378Sfenner { 787111932Sseanc for (size_t __n = 0; __n < _M_bin_size; ++__n) 78836378Sfenner { 789111932Sseanc _Bin_record& __bin = _M_bin[__n]; 790111932Sseanc __v = ::operator new(sizeof(_Block_record*)); 791111932Sseanc __bin._M_first = static_cast<_Block_record**>(__v); 79236378Sfenner __bin._M_first[0] = NULL; 7931558Srgrimes __bin._M_address = NULL; 79446643Smckay } 79536378Sfenner } 79636378Sfenner _M_init = true; 79736378Sfenner } 79836378Sfenner#endif 79936378Sfenner 80036378Sfenner // Instantiations. 80146643Smckay template class __mt_alloc<char>; 80236378Sfenner template class __mt_alloc<wchar_t>; 80336378Sfenner 80436378Sfenner_GLIBCXX_END_NAMESPACE 80536378Sfenner