1169691Skan// Allocator details.
2169691Skan
3169691Skan// Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
4169691Skan//
5169691Skan// This file is part of the GNU ISO C++ Library.  This library is free
6169691Skan// software; you can redistribute it and/or modify it under the
7169691Skan// terms of the GNU General Public License as published by the
8169691Skan// Free Software Foundation; either version 2, or (at your option)
9169691Skan// any later version.
10169691Skan
11169691Skan// This library is distributed in the hope that it will be useful,
12169691Skan// but WITHOUT ANY WARRANTY; without even the implied warranty of
13169691Skan// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14169691Skan// GNU General Public License for more details.
15169691Skan
16169691Skan// You should have received a copy of the GNU General Public License along
17169691Skan// with this library; see the file COPYING.  If not, write to the Free
18169691Skan// Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19169691Skan// USA.
20169691Skan
21169691Skan// As a special exception, you may use this file as part of a free software
22169691Skan// library without restriction.  Specifically, if other files instantiate
23169691Skan// templates or use macros or inline functions from this file, or you compile
24169691Skan// this file and link it with other files to produce an executable, this
25169691Skan// file does not by itself cause the resulting executable to be covered by
26169691Skan// the GNU General Public License.  This exception does not however
27169691Skan// invalidate any other reasons why the executable file might be covered by
28169691Skan// the GNU General Public License.
29169691Skan
30169691Skan//
31169691Skan// ISO C++ 14882:
32169691Skan//
33169691Skan
34169691Skan#include <bits/c++config.h>
35169691Skan#include <ext/concurrence.h>
36169691Skan#include <ext/mt_allocator.h>
37169691Skan#include <cstring>
38169691Skan
39169691Skannamespace
40169691Skan{
41169691Skan#ifdef __GTHREADS
42169691Skan  struct __freelist
43169691Skan  {
44169691Skan    typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45169691Skan    _Thread_record* 	_M_thread_freelist;
46169691Skan    _Thread_record* 	_M_thread_freelist_array;
47169691Skan    size_t 		_M_max_threads;
48169691Skan    __gthread_key_t 	_M_key;
49169691Skan
50169691Skan    ~__freelist()
51169691Skan    {
52169691Skan      if (_M_thread_freelist_array)
53169691Skan	{
54169691Skan	  __gthread_key_delete(_M_key);
55169691Skan	  ::operator delete(static_cast<void*>(_M_thread_freelist_array));
56169691Skan	}
57169691Skan    }
58169691Skan  };
59169691Skan
60169691Skan  // Ensure freelist is constructed first.
61169691Skan  static __freelist freelist;
62169691Skan  __gnu_cxx::__mutex freelist_mutex;
63169691Skan
64169691Skan  static void
65169691Skan  _M_destroy_thread_key(void* __id)
66169691Skan  {
67169691Skan    // Return this thread id record to the front of thread_freelist.
68169691Skan    __gnu_cxx::__scoped_lock sentry(freelist_mutex);
69169691Skan    size_t _M_id = reinterpret_cast<size_t>(__id);
70169691Skan
71169691Skan    typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
72169691Skan    _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
73169691Skan    __tr->_M_next = freelist._M_thread_freelist;
74169691Skan    freelist._M_thread_freelist = __tr;
75169691Skan  }
76169691Skan#endif
77169691Skan} // anonymous namespace
78169691Skan
79169691Skan_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
80169691Skan
81169691Skan  void
82169691Skan  __pool<false>::_M_destroy() throw()
83169691Skan  {
84169691Skan    if (_M_init && !_M_options._M_force_new)
85169691Skan      {
86169691Skan	for (size_t __n = 0; __n < _M_bin_size; ++__n)
87169691Skan	  {
88169691Skan	    _Bin_record& __bin = _M_bin[__n];
89169691Skan	    while (__bin._M_address)
90169691Skan	      {
91169691Skan		_Block_address* __tmp = __bin._M_address->_M_next;
92169691Skan		::operator delete(__bin._M_address->_M_initial);
93169691Skan		__bin._M_address = __tmp;
94169691Skan	      }
95169691Skan	    ::operator delete(__bin._M_first);
96169691Skan	  }
97169691Skan	::operator delete(_M_bin);
98169691Skan	::operator delete(_M_binmap);
99169691Skan      }
100169691Skan  }
101169691Skan
102169691Skan  void
103169691Skan  __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
104169691Skan  {
105169691Skan    // Round up to power of 2 and figure out which bin to use.
106169691Skan    const size_t __which = _M_binmap[__bytes];
107169691Skan    _Bin_record& __bin = _M_bin[__which];
108169691Skan
109169691Skan    char* __c = __p - _M_get_align();
110258429Spfg    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
111169691Skan
112169691Skan    // Single threaded application - return to global pool.
113258429Spfg    __block_record->_M_next = __bin._M_first[0];
114258429Spfg    __bin._M_first[0] = __block_record;
115169691Skan  }
116169691Skan
117169691Skan  char*
118169691Skan  __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
119169691Skan  {
120169691Skan    // Round up to power of 2 and figure out which bin to use.
121169691Skan    const size_t __which = _M_binmap[__bytes];
122169691Skan    _Bin_record& __bin = _M_bin[__which];
123169691Skan    const _Tune& __options = _M_get_options();
124169691Skan    const size_t __bin_size = (__options._M_min_bin << __which)
125169691Skan			       + __options._M_align;
126169691Skan    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
127169691Skan    __block_count /= __bin_size;
128169691Skan
129169691Skan    // Get a new block dynamically, set it up for use.
130169691Skan    void* __v = ::operator new(__options._M_chunk_size);
131169691Skan    _Block_address* __address = static_cast<_Block_address*>(__v);
132169691Skan    __address->_M_initial = __v;
133169691Skan    __address->_M_next = __bin._M_address;
134169691Skan    __bin._M_address = __address;
135169691Skan
136169691Skan    char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
137258429Spfg    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
138258429Spfg    __bin._M_first[__thread_id] = __block_record;
139169691Skan    while (--__block_count > 0)
140169691Skan      {
141169691Skan	__c += __bin_size;
142258429Spfg	__block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
143258429Spfg	__block_record = __block_record->_M_next;
144169691Skan      }
145258429Spfg    __block_record->_M_next = NULL;
146169691Skan
147258429Spfg    __block_record = __bin._M_first[__thread_id];
148258429Spfg    __bin._M_first[__thread_id] = __block_record->_M_next;
149169691Skan
150169691Skan    // NB: For alignment reasons, we can't use the first _M_align
151169691Skan    // bytes, even when sizeof(_Block_record) < _M_align.
152258429Spfg    return reinterpret_cast<char*>(__block_record) + __options._M_align;
153169691Skan  }
154169691Skan
155169691Skan  void
156169691Skan  __pool<false>::_M_initialize()
157169691Skan  {
158169691Skan    // _M_force_new must not change after the first allocate(), which
159169691Skan    // in turn calls this method, so if it's false, it's false forever
160169691Skan    // and we don't need to return here ever again.
161169691Skan    if (_M_options._M_force_new)
162169691Skan      {
163169691Skan	_M_init = true;
164169691Skan	return;
165169691Skan      }
166169691Skan
167169691Skan    // Create the bins.
168169691Skan    // Calculate the number of bins required based on _M_max_bytes.
169169691Skan    // _M_bin_size is statically-initialized to one.
170169691Skan    size_t __bin_size = _M_options._M_min_bin;
171169691Skan    while (_M_options._M_max_bytes > __bin_size)
172169691Skan      {
173169691Skan	__bin_size <<= 1;
174169691Skan	++_M_bin_size;
175169691Skan      }
176169691Skan
177169691Skan    // Setup the bin map for quick lookup of the relevant bin.
178169691Skan    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
179169691Skan    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
180169691Skan    _Binmap_type* __bp = _M_binmap;
181169691Skan    _Binmap_type __bin_max = _M_options._M_min_bin;
182169691Skan    _Binmap_type __bint = 0;
183169691Skan    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
184169691Skan      {
185169691Skan	if (__ct > __bin_max)
186169691Skan	  {
187169691Skan	    __bin_max <<= 1;
188169691Skan	    ++__bint;
189169691Skan	  }
190169691Skan	*__bp++ = __bint;
191169691Skan      }
192169691Skan
193169691Skan    // Initialize _M_bin and its members.
194169691Skan    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
195169691Skan    _M_bin = static_cast<_Bin_record*>(__v);
196169691Skan    for (size_t __n = 0; __n < _M_bin_size; ++__n)
197169691Skan      {
198169691Skan	_Bin_record& __bin = _M_bin[__n];
199169691Skan	__v = ::operator new(sizeof(_Block_record*));
200169691Skan	__bin._M_first = static_cast<_Block_record**>(__v);
201169691Skan	__bin._M_first[0] = NULL;
202169691Skan	__bin._M_address = NULL;
203169691Skan      }
204169691Skan    _M_init = true;
205169691Skan  }
206169691Skan
207169691Skan
208169691Skan#ifdef __GTHREADS
209169691Skan  void
210169691Skan  __pool<true>::_M_destroy() throw()
211169691Skan  {
212169691Skan    if (_M_init && !_M_options._M_force_new)
213169691Skan      {
214169691Skan	if (__gthread_active_p())
215169691Skan	  {
216169691Skan	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
217169691Skan	      {
218169691Skan		_Bin_record& __bin = _M_bin[__n];
219169691Skan		while (__bin._M_address)
220169691Skan		  {
221169691Skan		    _Block_address* __tmp = __bin._M_address->_M_next;
222169691Skan		    ::operator delete(__bin._M_address->_M_initial);
223169691Skan		    __bin._M_address = __tmp;
224169691Skan		  }
225169691Skan		::operator delete(__bin._M_first);
226169691Skan		::operator delete(__bin._M_free);
227169691Skan		::operator delete(__bin._M_used);
228169691Skan		::operator delete(__bin._M_mutex);
229169691Skan	      }
230169691Skan	  }
231169691Skan	else
232169691Skan	  {
233169691Skan	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
234169691Skan	      {
235169691Skan		_Bin_record& __bin = _M_bin[__n];
236169691Skan		while (__bin._M_address)
237169691Skan		  {
238169691Skan		    _Block_address* __tmp = __bin._M_address->_M_next;
239169691Skan		    ::operator delete(__bin._M_address->_M_initial);
240169691Skan		    __bin._M_address = __tmp;
241169691Skan		  }
242169691Skan		::operator delete(__bin._M_first);
243169691Skan	      }
244169691Skan	  }
245169691Skan	::operator delete(_M_bin);
246169691Skan	::operator delete(_M_binmap);
247169691Skan      }
248169691Skan  }
249169691Skan
250169691Skan  void
251169691Skan  __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
252169691Skan  {
253169691Skan    // Round up to power of 2 and figure out which bin to use.
254169691Skan    const size_t __which = _M_binmap[__bytes];
255169691Skan    const _Bin_record& __bin = _M_bin[__which];
256169691Skan
257169691Skan    // Know __p not null, assume valid block.
258169691Skan    char* __c = __p - _M_get_align();
259258429Spfg    _Block_record* __block_record = reinterpret_cast<_Block_record*>(__c);
260169691Skan    if (__gthread_active_p())
261169691Skan      {
262169691Skan	// Calculate the number of records to remove from our freelist:
263169691Skan	// in order to avoid too much contention we wait until the
264169691Skan	// number of records is "high enough".
265169691Skan	const size_t __thread_id = _M_get_thread_id();
266169691Skan	const _Tune& __options = _M_get_options();
267169691Skan	const size_t __limit = (100 * (_M_bin_size - __which)
268169691Skan				* __options._M_freelist_headroom);
269169691Skan
270169691Skan	size_t __remove = __bin._M_free[__thread_id];
271169691Skan	__remove *= __options._M_freelist_headroom;
272169691Skan
273169691Skan	// NB: We assume that reads of _Atomic_words are atomic.
274169691Skan	const size_t __max_threads = __options._M_max_threads + 1;
275169691Skan	_Atomic_word* const __reclaimed_base =
276169691Skan	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
277169691Skan	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
278169691Skan	const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
279169691Skan
280169691Skan	// NB: For performance sake we don't resync every time, in order
281169691Skan	// to spare atomic ops.  Note that if __reclaimed increased by,
282169691Skan	// say, 1024, since the last sync, it means that the other
283169691Skan	// threads executed the atomic in the else below at least the
284169691Skan	// same number of times (at least, because _M_reserve_block may
285169691Skan	// have decreased the counter), therefore one more cannot hurt.
286169691Skan	if (__reclaimed > 1024)
287169691Skan	  {
288169691Skan	    __bin._M_used[__thread_id] -= __reclaimed;
289169691Skan	    __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
290169691Skan	  }
291169691Skan
292169691Skan	if (__remove >= __net_used)
293169691Skan	  __remove -= __net_used;
294169691Skan	else
295169691Skan	  __remove = 0;
296169691Skan	if (__remove > __limit && __remove > __bin._M_free[__thread_id])
297169691Skan	  {
298169691Skan	    _Block_record* __first = __bin._M_first[__thread_id];
299169691Skan	    _Block_record* __tmp = __first;
300169691Skan	    __remove /= __options._M_freelist_headroom;
301169691Skan	    const size_t __removed = __remove;
302169691Skan	    while (--__remove > 0)
303169691Skan	      __tmp = __tmp->_M_next;
304169691Skan	    __bin._M_first[__thread_id] = __tmp->_M_next;
305169691Skan	    __bin._M_free[__thread_id] -= __removed;
306169691Skan
307169691Skan	    __gthread_mutex_lock(__bin._M_mutex);
308169691Skan	    __tmp->_M_next = __bin._M_first[0];
309169691Skan	    __bin._M_first[0] = __first;
310169691Skan	    __bin._M_free[0] += __removed;
311169691Skan	    __gthread_mutex_unlock(__bin._M_mutex);
312169691Skan	  }
313169691Skan
314169691Skan	// Return this block to our list and update counters and
315169691Skan	// owner id as needed.
316258429Spfg	if (__block_record->_M_thread_id == __thread_id)
317169691Skan	  --__bin._M_used[__thread_id];
318169691Skan	else
319258429Spfg	  __atomic_add(&__reclaimed_base[__block_record->_M_thread_id], 1);
320169691Skan
321258429Spfg	__block_record->_M_next = __bin._M_first[__thread_id];
322258429Spfg	__bin._M_first[__thread_id] = __block_record;
323169691Skan
324169691Skan	++__bin._M_free[__thread_id];
325169691Skan      }
326169691Skan    else
327169691Skan      {
328169691Skan	// Not using threads, so single threaded application - return
329169691Skan	// to global pool.
330258429Spfg	__block_record->_M_next = __bin._M_first[0];
331258429Spfg	__bin._M_first[0] = __block_record;
332169691Skan      }
333169691Skan  }
334169691Skan
335169691Skan  char*
336169691Skan  __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
337169691Skan  {
338169691Skan    // Round up to power of 2 and figure out which bin to use.
339169691Skan    const size_t __which = _M_binmap[__bytes];
340169691Skan    const _Tune& __options = _M_get_options();
341169691Skan    const size_t __bin_size = ((__options._M_min_bin << __which)
342169691Skan			       + __options._M_align);
343169691Skan    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
344169691Skan    __block_count /= __bin_size;
345169691Skan
346169691Skan    // Are we using threads?
347169691Skan    // - Yes, check if there are free blocks on the global
348169691Skan    //   list. If so, grab up to __block_count blocks in one
349169691Skan    //   lock and change ownership. If the global list is
350169691Skan    //   empty, we allocate a new chunk and add those blocks
351169691Skan    //   directly to our own freelist (with us as owner).
352169691Skan    // - No, all operations are made directly to global pool 0
353169691Skan    //   no need to lock or change ownership but check for free
354169691Skan    //   blocks on global list (and if not add new ones) and
355169691Skan    //   get the first one.
356169691Skan    _Bin_record& __bin = _M_bin[__which];
357258429Spfg    _Block_record* __block_record = NULL;
358169691Skan    if (__gthread_active_p())
359169691Skan      {
360169691Skan	// Resync the _M_used counters.
361169691Skan	const size_t __max_threads = __options._M_max_threads + 1;
362169691Skan	_Atomic_word* const __reclaimed_base =
363169691Skan	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
364169691Skan	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
365169691Skan	__bin._M_used[__thread_id] -= __reclaimed;
366169691Skan	__atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
367169691Skan
368169691Skan	__gthread_mutex_lock(__bin._M_mutex);
369169691Skan	if (__bin._M_first[0] == NULL)
370169691Skan	  {
371169691Skan	    void* __v = ::operator new(__options._M_chunk_size);
372169691Skan	    _Block_address* __address = static_cast<_Block_address*>(__v);
373169691Skan	    __address->_M_initial = __v;
374169691Skan	    __address->_M_next = __bin._M_address;
375169691Skan	    __bin._M_address = __address;
376169691Skan	    __gthread_mutex_unlock(__bin._M_mutex);
377169691Skan
378169691Skan	    // No need to hold the lock when we are adding a whole
379169691Skan	    // chunk to our own list.
380169691Skan	    char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
381258429Spfg	    __block_record = reinterpret_cast<_Block_record*>(__c);
382169691Skan	    __bin._M_free[__thread_id] = __block_count;
383258429Spfg	    __bin._M_first[__thread_id] = __block_record;
384169691Skan	    while (--__block_count > 0)
385169691Skan	      {
386169691Skan		__c += __bin_size;
387258429Spfg		__block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
388258429Spfg		__block_record = __block_record->_M_next;
389169691Skan	      }
390258429Spfg	    __block_record->_M_next = NULL;
391169691Skan	  }
392169691Skan	else
393169691Skan	  {
394169691Skan	    // Is the number of required blocks greater than or equal
395169691Skan	    // to the number that can be provided by the global free
396169691Skan	    // list?
397169691Skan	    __bin._M_first[__thread_id] = __bin._M_first[0];
398169691Skan	    if (__block_count >= __bin._M_free[0])
399169691Skan	      {
400169691Skan		__bin._M_free[__thread_id] = __bin._M_free[0];
401169691Skan		__bin._M_free[0] = 0;
402169691Skan		__bin._M_first[0] = NULL;
403169691Skan	      }
404169691Skan	    else
405169691Skan	      {
406169691Skan		__bin._M_free[__thread_id] = __block_count;
407169691Skan		__bin._M_free[0] -= __block_count;
408258429Spfg		__block_record = __bin._M_first[0];
409169691Skan		while (--__block_count > 0)
410258429Spfg		  __block_record = __block_record->_M_next;
411258429Spfg		__bin._M_first[0] = __block_record->_M_next;
412258429Spfg		__block_record->_M_next = NULL;
413169691Skan	      }
414169691Skan	    __gthread_mutex_unlock(__bin._M_mutex);
415169691Skan	  }
416169691Skan      }
417169691Skan    else
418169691Skan      {
419169691Skan	void* __v = ::operator new(__options._M_chunk_size);
420169691Skan	_Block_address* __address = static_cast<_Block_address*>(__v);
421169691Skan	__address->_M_initial = __v;
422169691Skan	__address->_M_next = __bin._M_address;
423169691Skan	__bin._M_address = __address;
424169691Skan
425169691Skan	char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
426258429Spfg	__block_record = reinterpret_cast<_Block_record*>(__c);
427258429Spfg 	__bin._M_first[0] = __block_record;
428169691Skan	while (--__block_count > 0)
429169691Skan	  {
430169691Skan	    __c += __bin_size;
431258429Spfg	    __block_record->_M_next = reinterpret_cast<_Block_record*>(__c);
432258429Spfg	    __block_record = __block_record->_M_next;
433169691Skan	  }
434258429Spfg	__block_record->_M_next = NULL;
435169691Skan      }
436169691Skan
437258429Spfg    __block_record = __bin._M_first[__thread_id];
438258429Spfg    __bin._M_first[__thread_id] = __block_record->_M_next;
439169691Skan
440169691Skan    if (__gthread_active_p())
441169691Skan      {
442258429Spfg	__block_record->_M_thread_id = __thread_id;
443169691Skan	--__bin._M_free[__thread_id];
444169691Skan	++__bin._M_used[__thread_id];
445169691Skan      }
446169691Skan
447169691Skan    // NB: For alignment reasons, we can't use the first _M_align
448169691Skan    // bytes, even when sizeof(_Block_record) < _M_align.
449258429Spfg    return reinterpret_cast<char*>(__block_record) + __options._M_align;
450169691Skan  }
451169691Skan
452169691Skan  void
453169691Skan  __pool<true>::_M_initialize()
454169691Skan  {
455169691Skan    // _M_force_new must not change after the first allocate(),
456169691Skan    // which in turn calls this method, so if it's false, it's false
457169691Skan    // forever and we don't need to return here ever again.
458169691Skan    if (_M_options._M_force_new)
459169691Skan      {
460169691Skan	_M_init = true;
461169691Skan	return;
462169691Skan      }
463169691Skan
464169691Skan    // Create the bins.
465169691Skan    // Calculate the number of bins required based on _M_max_bytes.
466169691Skan    // _M_bin_size is statically-initialized to one.
467169691Skan    size_t __bin_size = _M_options._M_min_bin;
468169691Skan    while (_M_options._M_max_bytes > __bin_size)
469169691Skan      {
470169691Skan	__bin_size <<= 1;
471169691Skan	++_M_bin_size;
472169691Skan      }
473169691Skan
474169691Skan    // Setup the bin map for quick lookup of the relevant bin.
475169691Skan    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
476169691Skan    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
477169691Skan    _Binmap_type* __bp = _M_binmap;
478169691Skan    _Binmap_type __bin_max = _M_options._M_min_bin;
479169691Skan    _Binmap_type __bint = 0;
480169691Skan    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
481169691Skan      {
482169691Skan	if (__ct > __bin_max)
483169691Skan	  {
484169691Skan	    __bin_max <<= 1;
485169691Skan	    ++__bint;
486169691Skan	  }
487169691Skan	*__bp++ = __bint;
488169691Skan      }
489169691Skan
490169691Skan    // Initialize _M_bin and its members.
491169691Skan    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
492169691Skan    _M_bin = static_cast<_Bin_record*>(__v);
493169691Skan
494169691Skan    // If __gthread_active_p() create and initialize the list of
495169691Skan    // free thread ids. Single threaded applications use thread id 0
496169691Skan    // directly and have no need for this.
497169691Skan    if (__gthread_active_p())
498169691Skan      {
499169691Skan	{
500169691Skan	  __gnu_cxx::__scoped_lock sentry(freelist_mutex);
501169691Skan
502169691Skan	  if (!freelist._M_thread_freelist_array
503169691Skan	      || freelist._M_max_threads < _M_options._M_max_threads)
504169691Skan	    {
505169691Skan	      const size_t __k = sizeof(_Thread_record)
506169691Skan				 * _M_options._M_max_threads;
507169691Skan	      __v = ::operator new(__k);
508169691Skan	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
509169691Skan
510169691Skan	      // NOTE! The first assignable thread id is 1 since the
511169691Skan	      // global pool uses id 0
512169691Skan	      size_t __i;
513169691Skan	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
514169691Skan		{
515169691Skan		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
516169691Skan		  __tr._M_next = &_M_thread_freelist[__i];
517169691Skan		  __tr._M_id = __i;
518169691Skan		}
519169691Skan
520169691Skan	      // Set last record.
521169691Skan	      _M_thread_freelist[__i - 1]._M_next = NULL;
522169691Skan	      _M_thread_freelist[__i - 1]._M_id = __i;
523169691Skan
524169691Skan	      if (!freelist._M_thread_freelist_array)
525169691Skan		{
526169691Skan		  // Initialize per thread key to hold pointer to
527169691Skan		  // _M_thread_freelist.
528169691Skan		  __gthread_key_create(&freelist._M_key,
529169691Skan				       ::_M_destroy_thread_key);
530169691Skan		  freelist._M_thread_freelist = _M_thread_freelist;
531169691Skan		}
532169691Skan	      else
533169691Skan		{
534169691Skan		  _Thread_record* _M_old_freelist
535169691Skan		    = freelist._M_thread_freelist;
536169691Skan		  _Thread_record* _M_old_array
537169691Skan		    = freelist._M_thread_freelist_array;
538169691Skan		  freelist._M_thread_freelist
539169691Skan		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
540169691Skan		  while (_M_old_freelist)
541169691Skan		    {
542169691Skan		      size_t next_id;
543169691Skan		      if (_M_old_freelist->_M_next)
544169691Skan			next_id = _M_old_freelist->_M_next - _M_old_array;
545169691Skan		      else
546169691Skan			next_id = freelist._M_max_threads;
547169691Skan		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
548169691Skan			= &_M_thread_freelist[next_id];
549169691Skan		      _M_old_freelist = _M_old_freelist->_M_next;
550169691Skan		    }
551169691Skan		  ::operator delete(static_cast<void*>(_M_old_array));
552169691Skan		}
553169691Skan	      freelist._M_thread_freelist_array = _M_thread_freelist;
554169691Skan	      freelist._M_max_threads = _M_options._M_max_threads;
555169691Skan	    }
556169691Skan	}
557169691Skan
558169691Skan	const size_t __max_threads = _M_options._M_max_threads + 1;
559169691Skan	for (size_t __n = 0; __n < _M_bin_size; ++__n)
560169691Skan	  {
561169691Skan	    _Bin_record& __bin = _M_bin[__n];
562169691Skan	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
563169691Skan	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
564169691Skan	    __bin._M_first = static_cast<_Block_record**>(__v);
565169691Skan
566169691Skan	    __bin._M_address = NULL;
567169691Skan
568169691Skan	    __v = ::operator new(sizeof(size_t) * __max_threads);
569169691Skan	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
570169691Skan
571169691Skan	    __bin._M_free = static_cast<size_t*>(__v);
572169691Skan
573169691Skan	    __v = ::operator new(sizeof(size_t) * __max_threads
574169691Skan				 + sizeof(_Atomic_word) * __max_threads);
575169691Skan	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
576169691Skan				 + sizeof(_Atomic_word) * __max_threads));
577169691Skan	    __bin._M_used = static_cast<size_t*>(__v);
578169691Skan
579169691Skan	    __v = ::operator new(sizeof(__gthread_mutex_t));
580169691Skan	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
581169691Skan
582169691Skan#ifdef __GTHREAD_MUTEX_INIT
583169691Skan	    {
584169691Skan	      // Do not copy a POSIX/gthr mutex once in use.
585169691Skan	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
586169691Skan	      *__bin._M_mutex = __tmp;
587169691Skan	    }
588169691Skan#else
589169691Skan	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
590169691Skan#endif
591169691Skan	  }
592169691Skan      }
593169691Skan    else
594169691Skan      {
595169691Skan	for (size_t __n = 0; __n < _M_bin_size; ++__n)
596169691Skan	  {
597169691Skan	    _Bin_record& __bin = _M_bin[__n];
598169691Skan	    __v = ::operator new(sizeof(_Block_record*));
599169691Skan	    __bin._M_first = static_cast<_Block_record**>(__v);
600169691Skan	    __bin._M_first[0] = NULL;
601169691Skan	    __bin._M_address = NULL;
602169691Skan	  }
603169691Skan      }
604169691Skan    _M_init = true;
605169691Skan  }
606169691Skan
607169691Skan  size_t
608169691Skan  __pool<true>::_M_get_thread_id()
609169691Skan  {
610169691Skan    // If we have thread support and it's active we check the thread
611169691Skan    // key value and return its id or if it's not set we take the
612169691Skan    // first record from _M_thread_freelist and sets the key and
613169691Skan    // returns it's id.
614169691Skan    if (__gthread_active_p())
615169691Skan      {
616169691Skan	void* v = __gthread_getspecific(freelist._M_key);
617169691Skan	size_t _M_id = (size_t)v;
618169691Skan	if (_M_id == 0)
619169691Skan	  {
620169691Skan	    {
621169691Skan	      __gnu_cxx::__scoped_lock sentry(freelist_mutex);
622169691Skan	      if (freelist._M_thread_freelist)
623169691Skan		{
624169691Skan		  _M_id = freelist._M_thread_freelist->_M_id;
625169691Skan		  freelist._M_thread_freelist
626169691Skan		    = freelist._M_thread_freelist->_M_next;
627169691Skan		}
628169691Skan	    }
629169691Skan
630169691Skan	    __gthread_setspecific(freelist._M_key, (void*)_M_id);
631169691Skan	  }
632169691Skan	return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
633169691Skan      }
634169691Skan
635169691Skan    // Otherwise (no thread support or inactive) all requests are
636169691Skan    // served from the global pool 0.
637169691Skan    return 0;
638169691Skan  }
639169691Skan
640169691Skan  // XXX GLIBCXX_ABI Deprecated
641169691Skan  void
642169691Skan  __pool<true>::_M_destroy_thread_key(void*) { }
643169691Skan
644169691Skan  // XXX GLIBCXX_ABI Deprecated
645169691Skan  void
646169691Skan  __pool<true>::_M_initialize(__destroy_handler)
647169691Skan  {
648169691Skan    // _M_force_new must not change after the first allocate(),
649169691Skan    // which in turn calls this method, so if it's false, it's false
650169691Skan    // forever and we don't need to return here ever again.
651169691Skan    if (_M_options._M_force_new)
652169691Skan      {
653169691Skan	_M_init = true;
654169691Skan	return;
655169691Skan      }
656169691Skan
657169691Skan    // Create the bins.
658169691Skan    // Calculate the number of bins required based on _M_max_bytes.
659169691Skan    // _M_bin_size is statically-initialized to one.
660169691Skan    size_t __bin_size = _M_options._M_min_bin;
661169691Skan    while (_M_options._M_max_bytes > __bin_size)
662169691Skan      {
663169691Skan	__bin_size <<= 1;
664169691Skan	++_M_bin_size;
665169691Skan      }
666169691Skan
667169691Skan    // Setup the bin map for quick lookup of the relevant bin.
668169691Skan    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
669169691Skan    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
670169691Skan    _Binmap_type* __bp = _M_binmap;
671169691Skan    _Binmap_type __bin_max = _M_options._M_min_bin;
672169691Skan    _Binmap_type __bint = 0;
673169691Skan    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
674169691Skan      {
675169691Skan	if (__ct > __bin_max)
676169691Skan	  {
677169691Skan	    __bin_max <<= 1;
678169691Skan	    ++__bint;
679169691Skan	  }
680169691Skan	*__bp++ = __bint;
681169691Skan      }
682169691Skan
683169691Skan    // Initialize _M_bin and its members.
684169691Skan    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
685169691Skan    _M_bin = static_cast<_Bin_record*>(__v);
686169691Skan
687169691Skan    // If __gthread_active_p() create and initialize the list of
688169691Skan    // free thread ids. Single threaded applications use thread id 0
689169691Skan    // directly and have no need for this.
690169691Skan    if (__gthread_active_p())
691169691Skan      {
692169691Skan	{
693169691Skan	  __gnu_cxx::__scoped_lock sentry(freelist_mutex);
694169691Skan
695169691Skan	  if (!freelist._M_thread_freelist_array
696169691Skan	      || freelist._M_max_threads < _M_options._M_max_threads)
697169691Skan	    {
698169691Skan	      const size_t __k = sizeof(_Thread_record)
699169691Skan				 * _M_options._M_max_threads;
700169691Skan	      __v = ::operator new(__k);
701169691Skan	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
702169691Skan
703169691Skan	      // NOTE! The first assignable thread id is 1 since the
704169691Skan	      // global pool uses id 0
705169691Skan	      size_t __i;
706169691Skan	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
707169691Skan		{
708169691Skan		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
709169691Skan		  __tr._M_next = &_M_thread_freelist[__i];
710169691Skan		  __tr._M_id = __i;
711169691Skan		}
712169691Skan
713169691Skan	      // Set last record.
714169691Skan	      _M_thread_freelist[__i - 1]._M_next = NULL;
715169691Skan	      _M_thread_freelist[__i - 1]._M_id = __i;
716169691Skan
717169691Skan	      if (!freelist._M_thread_freelist_array)
718169691Skan		{
719169691Skan		  // Initialize per thread key to hold pointer to
720169691Skan		  // _M_thread_freelist.
721169691Skan		  __gthread_key_create(&freelist._M_key,
722169691Skan				       ::_M_destroy_thread_key);
723169691Skan		  freelist._M_thread_freelist = _M_thread_freelist;
724169691Skan		}
725169691Skan	      else
726169691Skan		{
727169691Skan		  _Thread_record* _M_old_freelist
728169691Skan		    = freelist._M_thread_freelist;
729169691Skan		  _Thread_record* _M_old_array
730169691Skan		    = freelist._M_thread_freelist_array;
731169691Skan		  freelist._M_thread_freelist
732169691Skan		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
733169691Skan		  while (_M_old_freelist)
734169691Skan		    {
735169691Skan		      size_t next_id;
736169691Skan		      if (_M_old_freelist->_M_next)
737169691Skan			next_id = _M_old_freelist->_M_next - _M_old_array;
738169691Skan		      else
739169691Skan			next_id = freelist._M_max_threads;
740169691Skan		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
741169691Skan			= &_M_thread_freelist[next_id];
742169691Skan		      _M_old_freelist = _M_old_freelist->_M_next;
743169691Skan		    }
744169691Skan		  ::operator delete(static_cast<void*>(_M_old_array));
745169691Skan		}
746169691Skan	      freelist._M_thread_freelist_array = _M_thread_freelist;
747169691Skan	      freelist._M_max_threads = _M_options._M_max_threads;
748169691Skan	    }
749169691Skan	}
750169691Skan
751169691Skan	const size_t __max_threads = _M_options._M_max_threads + 1;
752169691Skan	for (size_t __n = 0; __n < _M_bin_size; ++__n)
753169691Skan	  {
754169691Skan	    _Bin_record& __bin = _M_bin[__n];
755169691Skan	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
756169691Skan	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
757169691Skan	    __bin._M_first = static_cast<_Block_record**>(__v);
758169691Skan
759169691Skan	    __bin._M_address = NULL;
760169691Skan
761169691Skan	    __v = ::operator new(sizeof(size_t) * __max_threads);
762169691Skan	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
763169691Skan	    __bin._M_free = static_cast<size_t*>(__v);
764169691Skan
765169691Skan	    __v = ::operator new(sizeof(size_t) * __max_threads +
766169691Skan				 sizeof(_Atomic_word) * __max_threads);
767169691Skan	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
768169691Skan				 + sizeof(_Atomic_word) * __max_threads));
769169691Skan	    __bin._M_used = static_cast<size_t*>(__v);
770169691Skan
771169691Skan	    __v = ::operator new(sizeof(__gthread_mutex_t));
772169691Skan	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
773169691Skan
774169691Skan#ifdef __GTHREAD_MUTEX_INIT
775169691Skan	    {
776169691Skan	      // Do not copy a POSIX/gthr mutex once in use.
777169691Skan	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
778169691Skan	      *__bin._M_mutex = __tmp;
779169691Skan	    }
780169691Skan#else
781169691Skan	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
782169691Skan#endif
783169691Skan	  }
784169691Skan      }
785169691Skan    else
786169691Skan      {
787169691Skan	for (size_t __n = 0; __n < _M_bin_size; ++__n)
788169691Skan	  {
789169691Skan	    _Bin_record& __bin = _M_bin[__n];
790169691Skan	    __v = ::operator new(sizeof(_Block_record*));
791169691Skan	    __bin._M_first = static_cast<_Block_record**>(__v);
792169691Skan	    __bin._M_first[0] = NULL;
793169691Skan	    __bin._M_address = NULL;
794169691Skan	  }
795169691Skan      }
796169691Skan    _M_init = true;
797169691Skan  }
798169691Skan#endif
799169691Skan
800169691Skan  // Instantiations.
801169691Skan  template class __mt_alloc<char>;
802169691Skan  template class __mt_alloc<wchar_t>;
803169691Skan
804169691Skan_GLIBCXX_END_NAMESPACE
805